You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by to...@apache.org on 2022/11/04 06:20:56 UTC

[hadoop] branch revert-5068-HDFS-16811 created (now 2c84d339df5)

This is an automated email from the ASF dual-hosted git repository.

tomscut pushed a change to branch revert-5068-HDFS-16811
in repository https://gitbox.apache.org/repos/asf/hadoop.git


      at 2c84d339df5 Revert "HDFS-16811. Support DecommissionBackoffMonitor Parameters reconfigurablereconfigurable (#5068)"

This branch includes the following new commits:

     new 2c84d339df5 Revert "HDFS-16811. Support DecommissionBackoffMonitor Parameters reconfigurablereconfigurable (#5068)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/01: Revert "HDFS-16811. Support DecommissionBackoffMonitor Parameters reconfigurablereconfigurable (#5068)"

Posted by to...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tomscut pushed a commit to branch revert-5068-HDFS-16811
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2c84d339df5983705cb16dce801aab10a48e9aab
Author: litao <to...@gmail.com>
AuthorDate: Fri Nov 4 14:20:50 2022 +0800

    Revert "HDFS-16811. Support DecommissionBackoffMonitor Parameters reconfigurablereconfigurable (#5068)"
    
    This reverts commit e9319e696c9b9b3aa63b2f3b7213b60423b328c3.
---
 .../DatanodeAdminBackoffMonitor.java               | 27 +------
 .../DatanodeAdminDefaultMonitor.java               | 23 ------
 .../blockmanagement/DatanodeAdminManager.java      | 26 -------
 .../DatanodeAdminMonitorInterface.java             |  8 --
 .../hadoop/hdfs/server/namenode/NameNode.java      | 40 +---------
 .../server/namenode/TestNameNodeReconfigure.java   | 85 ----------------------
 .../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java | 10 +--
 7 files changed, 7 insertions(+), 212 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
index 79d5a065b08..a7d72d019bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
-import org.apache.hadoop.classification.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import java.util.HashMap;
@@ -71,10 +70,10 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
       outOfServiceNodeBlocks = new HashMap<>();
 
   /**
-   * The number of blocks to process when moving blocks to pendingReplication
+   * The numbe of blocks to process when moving blocks to pendingReplication
    * before releasing and reclaiming the namenode lock.
    */
-  private volatile int blocksPerLock;
+  private int blocksPerLock;
 
   /**
    * The number of blocks that have been checked on this tick.
@@ -83,7 +82,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
   /**
    * The maximum number of blocks to hold in PendingRep at any time.
    */
-  private volatile int pendingRepLimit;
+  private int pendingRepLimit;
 
   /**
    * The list of blocks which have been placed onto the replication queue
@@ -802,26 +801,6 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
     return false;
   }
 
-  @VisibleForTesting
-  @Override
-  public int getPendingRepLimit() {
-    return pendingRepLimit;
-  }
-
-  public void setPendingRepLimit(int pendingRepLimit) {
-    this.pendingRepLimit = pendingRepLimit;
-  }
-
-  @VisibleForTesting
-  @Override
-  public int getBlocksPerLock() {
-    return blocksPerLock;
-  }
-
-  public void setBlocksPerLock(int blocksPerLock) {
-    this.blocksPerLock = blocksPerLock;
-  }
-
   static class BlockStats {
     private LightWeightHashSet<Long> openFiles =
         new LightWeightLinkedSet<>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
index a75fc5d6e35..e642dfba351 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.util.CyclicIteration;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.util.ChunkedArrayList;
-import org.apache.hadoop.classification.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -138,28 +137,6 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
     return numNodesChecked;
   }
 
-  @VisibleForTesting
-  @Override
-  public int getPendingRepLimit() {
-    return 0;
-  }
-
-  @Override
-  public void setPendingRepLimit(int pendingRepLimit) {
-    // nothing.
-  }
-
-  @VisibleForTesting
-  @Override
-  public int getBlocksPerLock() {
-    return 0;
-  }
-
-  @Override
-  public void setBlocksPerLock(int blocksPerLock) {
-    // nothing.
-  }
-
   @Override
   public void run() {
     LOG.debug("DatanodeAdminMonitor is running.");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index 92966f7fe45..887cb1072d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -419,30 +419,4 @@ public class DatanodeAdminManager {
     executor.submit(monitor).get();
   }
 
-  public void refreshPendingRepLimit(int pendingRepLimit, String key) {
-    ensurePositiveInt(pendingRepLimit, key);
-    this.monitor.setPendingRepLimit(pendingRepLimit);
-  }
-
-  @VisibleForTesting
-  public int getPendingRepLimit() {
-    return this.monitor.getPendingRepLimit();
-  }
-
-  public void refreshBlocksPerLock(int blocksPerLock, String key) {
-    ensurePositiveInt(blocksPerLock, key);
-    this.monitor.setBlocksPerLock(blocksPerLock);
-  }
-
-  @VisibleForTesting
-  public int getBlocksPerLock() {
-    return this.monitor.getBlocksPerLock();
-  }
-
-  private void ensurePositiveInt(int val, String key) {
-    Preconditions.checkArgument(
-        (val > 0),
-        key + " = '" + val + "' is invalid. " +
-            "It should be a positive, non-zero integer value.");
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java
index a4774742108..89673a759ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java
@@ -37,12 +37,4 @@ public interface DatanodeAdminMonitorInterface extends Runnable {
   void setBlockManager(BlockManager bm);
   void setDatanodeAdminManager(DatanodeAdminManager dnm);
   void setNameSystem(Namesystem ns);
-
-  int getPendingRepLimit();
-
-  void setPendingRepLimit(int pendingRepLimit);
-
-  int getBlocksPerLock();
-
-  void setBlocksPerLock(int blocksPerLock);
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 32ff45b2bfd..3d3b65d8e21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -205,10 +205,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COL
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK_DEFAULT;
 
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
@@ -357,9 +353,7 @@ public class NameNode extends ReconfigurableBase implements
           DFS_BLOCK_INVALIDATE_LIMIT_KEY,
           DFS_DATANODE_PEER_STATS_ENABLED_KEY,
           DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
-          DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
-          DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
-          DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK));
+          DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY));
 
   private static final String USAGE = "Usage: hdfs namenode ["
       + StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2327,10 +2321,6 @@ public class NameNode extends ReconfigurableBase implements
       return reconfigureSlowNodesParameters(datanodeManager, property, newVal);
     } else if (property.equals(DFS_BLOCK_INVALIDATE_LIMIT_KEY)) {
       return reconfigureBlockInvalidateLimit(datanodeManager, property, newVal);
-    } else if (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT) ||
-        (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK))) {
-      return reconfigureDecommissionBackoffMonitorParameters(datanodeManager, property,
-          newVal);
     } else {
       throw new ReconfigurationException(property, newVal, getConf().get(
           property));
@@ -2611,34 +2601,6 @@ public class NameNode extends ReconfigurableBase implements
     }
   }
 
-  private String reconfigureDecommissionBackoffMonitorParameters(
-      final DatanodeManager datanodeManager, final String property, final String newVal)
-      throws ReconfigurationException {
-    String newSetting = null;
-    try {
-      if (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT)) {
-        int pendingRepLimit = (newVal == null ?
-            DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT_DEFAULT :
-            Integer.parseInt(newVal));
-        datanodeManager.getDatanodeAdminManager().refreshPendingRepLimit(pendingRepLimit,
-            DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT);
-        newSetting = String.valueOf(datanodeManager.getDatanodeAdminManager().getPendingRepLimit());
-      } else if (property.equals(
-          DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK)) {
-        int blocksPerLock = (newVal == null ?
-            DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK_DEFAULT :
-            Integer.parseInt(newVal));
-        datanodeManager.getDatanodeAdminManager().refreshBlocksPerLock(blocksPerLock,
-            DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK);
-        newSetting = String.valueOf(datanodeManager.getDatanodeAdminManager().getBlocksPerLock());
-      }
-      LOG.info("RECONFIGURE* changed reconfigureDecommissionBackoffMonitorParameters {} to {}",
-          property, newSetting);
-      return newSetting;
-    } catch (IllegalArgumentException e) {
-      throw new ReconfigurationException(property, newVal, getConf().get(property), e);
-    }
-  }
 
   @Override  // ReconfigurableBase
   protected Configuration getNewConf() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 5573b1fa107..d0484298146 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -22,8 +22,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminBackoffMonitor;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminMonitorInterface;
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.After;
@@ -64,8 +62,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KE
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
 
 public class TestNameNodeReconfigure {
@@ -571,87 +567,6 @@ public class TestNameNodeReconfigure {
     return containReport;
   }
 
-  @Test
-  public void testReconfigureDecommissionBackoffMonitorParameters()
-      throws ReconfigurationException, IOException {
-    Configuration conf = new HdfsConfiguration();
-    conf.setClass(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MONITOR_CLASS,
-        DatanodeAdminBackoffMonitor.class, DatanodeAdminMonitorInterface.class);
-    int defaultPendingRepLimit = 1000;
-    conf.setInt(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, defaultPendingRepLimit);
-    int defaultBlocksPerLock = 1000;
-    conf.setInt(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
-        defaultBlocksPerLock);
-
-    try (MiniDFSCluster newCluster = new MiniDFSCluster.Builder(conf).build()) {
-      newCluster.waitActive();
-      final NameNode nameNode = newCluster.getNameNode();
-      final DatanodeManager datanodeManager = nameNode.namesystem
-          .getBlockManager().getDatanodeManager();
-
-      // verify defaultPendingRepLimit.
-      assertEquals(datanodeManager.getDatanodeAdminManager().getPendingRepLimit(),
-          defaultPendingRepLimit);
-
-      // try invalid pendingRepLimit.
-      try {
-        nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
-            "non-numeric");
-        fail("Should not reach here");
-      } catch (ReconfigurationException e) {
-        assertEquals("Could not change property " +
-            "dfs.namenode.decommission.backoff.monitor.pending.limit from '" +
-            defaultPendingRepLimit + "' to 'non-numeric'", e.getMessage());
-      }
-
-      try {
-        nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
-            "-1");
-        fail("Should not reach here");
-      } catch (ReconfigurationException e) {
-        assertEquals("Could not change property " +
-            "dfs.namenode.decommission.backoff.monitor.pending.limit from '" +
-            defaultPendingRepLimit + "' to '-1'", e.getMessage());
-      }
-
-      // try correct pendingRepLimit.
-      nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
-          "20000");
-      assertEquals(datanodeManager.getDatanodeAdminManager().getPendingRepLimit(), 20000);
-
-      // verify defaultBlocksPerLock.
-      assertEquals(datanodeManager.getDatanodeAdminManager().getBlocksPerLock(),
-          defaultBlocksPerLock);
-
-      // try invalid blocksPerLock.
-      try {
-        nameNode.reconfigureProperty(
-            DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
-            "non-numeric");
-        fail("Should not reach here");
-      } catch (ReconfigurationException e) {
-        assertEquals("Could not change property " +
-            "dfs.namenode.decommission.backoff.monitor.pending.blocks.per.lock from '" +
-            defaultBlocksPerLock + "' to 'non-numeric'", e.getMessage());
-      }
-
-      try {
-        nameNode.reconfigureProperty(
-            DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, "-1");
-        fail("Should not reach here");
-      } catch (ReconfigurationException e) {
-        assertEquals("Could not change property " +
-            "dfs.namenode.decommission.backoff.monitor.pending.blocks.per.lock from '" +
-            defaultBlocksPerLock + "' to '-1'", e.getMessage());
-      }
-
-      // try correct blocksPerLock.
-      nameNode.reconfigureProperty(
-          DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, "10000");
-      assertEquals(datanodeManager.getDatanodeAdminManager().getBlocksPerLock(), 10000);
-    }
-  }
-
   @After
   public void shutDown() throws IOException {
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 59491206dcb..99e4b348f61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -43,8 +43,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSN
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.text.TextStringBuilder;
@@ -440,7 +438,7 @@ public class TestDFSAdmin {
     final List<String> outs = Lists.newArrayList();
     final List<String> errs = Lists.newArrayList();
     getReconfigurableProperties("namenode", address, outs, errs);
-    assertEquals(22, outs.size());
+    assertEquals(20, outs.size());
     assertTrue(outs.get(0).contains("Reconfigurable properties:"));
     assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
     assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
@@ -451,10 +449,8 @@ public class TestDFSAdmin {
     assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(7));
     assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(8));
     assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(9));
-    assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, outs.get(10));
-    assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, outs.get(11));
-    assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(12));
-    assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(13));
+    assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(10));
+    assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(11));
     assertEquals(errs.size(), 0);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org