You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2018/08/20 22:07:34 UTC

[01/14] hadoop git commit: HADOOP-14212. Expose SecurityEnabled boolean field in JMX for other services besides NameNode. Contributed by Adam Antal.

Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 44c4928b6 -> 3712b79b3


HADOOP-14212. Expose SecurityEnabled boolean field in JMX for other services besides NameNode. Contributed by Adam Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78fb14ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78fb14ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78fb14ba

Branch: refs/heads/branch-3.1
Commit: 78fb14ba4908cc9222bc5c1239f63f7c203564a0
Parents: 44c4928
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Aug 14 17:19:00 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:49:24 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/DataNode.java   |  5 ++
 .../hdfs/server/datanode/DataNodeMXBean.java    |  7 +++
 .../hdfs/server/namenode/SecondaryNameNode.java |  5 ++
 .../namenode/SecondaryNameNodeInfoMXBean.java   |  7 +++
 .../server/datanode/TestDataNodeMXBean.java     | 47 +++++++++++++++++-
 .../server/namenode/TestSecureNameNode.java     | 52 +++++++++++++++++++-
 .../yarn/server/nodemanager/NodeManager.java    | 20 +++++++-
 .../server/resourcemanager/ResourceManager.java | 18 ++++++-
 8 files changed, 156 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 1e9c57a..4823358 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3142,6 +3142,11 @@ public class DataNode extends ReconfigurableBase
     }
   }
 
+  @Override
+  public boolean isSecurityEnabled() {
+    return UserGroupInformation.isSecurityEnabled();
+  }
+
   public void refreshNamenodes(Configuration conf) throws IOException {
     blockPoolManager.refreshNamenodes(conf);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index b5f0cd0..9d11e14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -146,4 +146,11 @@ public interface DataNodeMXBean {
    * @return list of slow disks
    */
   String getSlowDisks();
+
+  /**
+   * Gets if security is enabled.
+   *
+   * @return true, if security is enabled.
+   */
+  boolean isSecurityEnabled();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index ff83e34..4d7b747 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -722,6 +722,11 @@ public class SecondaryNameNode implements Runnable,
     return NetUtils.getHostPortString(nameNodeAddr);
   }
 
+  @Override
+  public boolean isSecurityEnabled() {
+    return UserGroupInformation.isSecurityEnabled();
+  }
+
   @Override // SecondaryNameNodeInfoMXBean
   public long getStartTime() {
     return starttime;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
index 785c5ee..a042dc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
@@ -32,6 +32,13 @@ public interface SecondaryNameNodeInfoMXBean extends VersionInfoMXBean {
   public String getHostAndPort();
 
   /**
+   * Gets if security is enabled.
+   *
+   * @return true, if security is enabled.
+   */
+  boolean isSecurityEnabled();
+
+  /**
    * @return the timestamp of when the SNN starts
    */
   public long getStartTime();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index 9107aae..3546ad8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
@@ -49,7 +51,7 @@ import static org.junit.Assert.assertTrue;
 /**
  * Class for testing {@link DataNodeMXBean} implementation
  */
-public class TestDataNodeMXBean {
+public class TestDataNodeMXBean extends SaslDataTransferTestCase {
 
   public static final Log LOG = LogFactory.getLog(TestDataNodeMXBean.class);
 
@@ -117,6 +119,49 @@ public class TestDataNodeMXBean {
       }
     }
   }
+
+  @Test
+  public void testDataNodeMXBeanSecurityEnabled() throws Exception {
+    Configuration simpleConf = new Configuration();
+    Configuration secureConf = createSecureConfig("authentication");
+
+    // get attribute "SecurityEnabled" with simple configuration
+    try (MiniDFSCluster cluster =
+                 new MiniDFSCluster.Builder(simpleConf).build()) {
+      List<DataNode> datanodes = cluster.getDataNodes();
+      Assert.assertEquals(datanodes.size(), 1);
+      DataNode datanode = datanodes.get(0);
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=DataNode,name=DataNodeInfo");
+
+      boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
+              "SecurityEnabled");
+      Assert.assertFalse(securityEnabled);
+      Assert.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
+    }
+
+    // get attribute "SecurityEnabled" with secure configuration
+    try (MiniDFSCluster cluster =
+                 new MiniDFSCluster.Builder(secureConf).build()) {
+      List<DataNode> datanodes = cluster.getDataNodes();
+      Assert.assertEquals(datanodes.size(), 1);
+      DataNode datanode = datanodes.get(0);
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=DataNode,name=DataNodeInfo");
+
+      boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
+              "SecurityEnabled");
+      Assert.assertTrue(securityEnabled);
+      Assert.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
+    }
+
+    // setting back the authentication method
+    UserGroupInformation.setConfiguration(simpleConf);
+  }
   
   private static String replaceDigits(final String s) {
     return s.replaceAll("[0-9]+", "_DIGITS_");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
index 6b6ce53..c90a91c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
@@ -22,8 +22,10 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
 import java.security.PrivilegedExceptionAction;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -33,10 +35,12 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
-
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
 
 public class TestSecureNameNode extends SaslDataTransferTestCase {
   final static private int NUM_OF_DATANODES = 0;
@@ -117,4 +121,50 @@ public class TestSecureNameNode extends SaslDataTransferTestCase {
     return;
   }
 
+  /**
+   * Test NameNodeStatusMXBean with security enabled and disabled.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testNameNodeStatusMXBeanSecurityEnabled() throws Exception {
+    Configuration simpleConf = new Configuration();
+    Configuration secureConf = createSecureConfig("authentication");
+
+    // disabling security
+    UserGroupInformation.setConfiguration(simpleConf);
+
+    // get attribute "SecurityEnabled" with simple configuration
+    try (MiniDFSCluster cluster =
+                 new MiniDFSCluster.Builder(simpleConf).build()) {
+      cluster.waitActive();
+      NameNode namenode = cluster.getNameNode();
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=NameNode,name=NameNodeStatus");
+
+      boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
+              "SecurityEnabled");
+      Assert.assertFalse(securityEnabled);
+      Assert.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
+    }
+
+    // get attribute "SecurityEnabled" with secure configuration
+    try (MiniDFSCluster cluster =
+                 new MiniDFSCluster.Builder(secureConf).build()) {
+      cluster.waitActive();
+      NameNode namenode = cluster.getNameNode();
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=NameNode,name=NameNodeStatus");
+
+      boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
+              "SecurityEnabled");
+      Assert.assertTrue(securityEnabled);
+      Assert.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index c8234bd..b54a6b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -25,8 +25,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.GenericOptionsParser;
@@ -87,8 +89,8 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-public class NodeManager extends CompositeService 
-    implements EventHandler<NodeManagerEvent> {
+public class NodeManager extends CompositeService
+    implements EventHandler<NodeManagerEvent>, NodeManagerMXBean {
 
   /**
    * Node manager return status codes.
@@ -470,6 +472,8 @@ public class NodeManager extends CompositeService
       throw new YarnRuntimeException("Failed NodeManager login", e);
     }
 
+    registerMXBean();
+
     super.serviceInit(conf);
     // TODO add local dirs to del
   }
@@ -947,6 +951,18 @@ public class NodeManager extends CompositeService
       LOG.warn("Invalid shutdown event " + event.getType() + ". Ignoring.");
     }
   }
+
+  /**
+   * Register NodeManagerMXBean.
+   */
+  private void registerMXBean() {
+    MBeans.register("NodeManager", "NodeManager", this);
+  }
+
+  @Override
+  public boolean isSecurityEnabled() {
+    return UserGroupInformation.isSecurityEnabled();
+  }
   
   // For testing
   NodeManager createNewNodeManager() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78fb14ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index bb85b67..d459f0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -140,7 +141,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
  *
  */
 @SuppressWarnings("unchecked")
-public class ResourceManager extends CompositeService implements Recoverable {
+public class ResourceManager extends CompositeService
+        implements Recoverable, ResourceManagerMXBean {
 
   /**
    * Priority of the ResourceManager shutdown hook.
@@ -337,6 +339,8 @@ public class ResourceManager extends CompositeService implements Recoverable {
     addIfService(systemMetricsPublisher);
     rmContext.setSystemMetricsPublisher(systemMetricsPublisher);
 
+    registerMXBean();
+
     super.serviceInit(this.conf);
   }
 
@@ -1600,4 +1604,16 @@ public class ResourceManager extends CompositeService implements Recoverable {
   protected RMAppLifetimeMonitor createRMAppLifetimeMonitor() {
     return new RMAppLifetimeMonitor(this.rmContext);
   }
+
+  /**
+   * Register ResourceManagerMXBean.
+   */
+  private void registerMXBean() {
+    MBeans.register("ResourceManager", "ResourceManager", this);
+  }
+
+  @Override
+  public boolean isSecurityEnabled() {
+    return UserGroupInformation.isSecurityEnabled();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/14] hadoop git commit: HDFS-13447. Fix Typos - Node Not Chosen. Contributed by Beluga Behr.

Posted by ar...@apache.org.
HDFS-13447. Fix Typos - Node Not Chosen. Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/366517ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/366517ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/366517ba

Branch: refs/heads/branch-3.1
Commit: 366517ba994c9a075d220a03ae6b829e66449b55
Parents: a517ee4
Author: Márton Elek <el...@apache.org>
Authored: Wed Aug 8 17:27:57 2018 +0200
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:52:04 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/366517ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index e2a9c55..ac20e6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -67,11 +67,11 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       .withInitial(() -> new HashMap<NodeNotChosenReason, Integer>());
 
   private enum NodeNotChosenReason {
-    NOT_IN_SERVICE("the node isn't in service"),
+    NOT_IN_SERVICE("the node is not in service"),
     NODE_STALE("the node is stale"),
     NODE_TOO_BUSY("the node is too busy"),
     TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
-    NOT_ENOUGH_STORAGE_SPACE("no enough storage space to place the block");
+    NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
 
     private final String text;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/14] hadoop git commit: HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to missing synchronization between rollEditsRpcExecutor and tailerThread shutdown. Contributed by Hrishikesh Gadre.

Posted by ar...@apache.org.
HDFS-13799. TestEditLogTailer#testTriggersLogRollsForAllStandbyNN fails due to missing synchronization
            between rollEditsRpcExecutor and tailerThread shutdown. Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75406990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75406990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75406990

Branch: refs/heads/branch-3.1
Commit: 754069906bda1d961bce9f584d0457ee10db6762
Parents: e4b75ad
Author: Xiao Chen <xi...@apache.org>
Authored: Tue Aug 7 16:11:37 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:51:07 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75406990/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 2003f94..b306b8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -234,7 +234,6 @@ public class EditLogTailer {
   }
   
   public void stop() throws IOException {
-    rollEditsRpcExecutor.shutdown();
     tailerThread.setShouldRun(false);
     tailerThread.interrupt();
     try {
@@ -242,6 +241,8 @@ public class EditLogTailer {
     } catch (InterruptedException e) {
       LOG.warn("Edit log tailer thread exited with an exception");
       throw new IOException(e);
+    } finally {
+      rollEditsRpcExecutor.shutdown();
     }
   }
   


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/14] hadoop git commit: HDFS-13668. FSPermissionChecker may throws AIOOE when check inode permission. Contributed by He Xiaoqiao.

Posted by ar...@apache.org.
HDFS-13668. FSPermissionChecker may throws AIOOE when check inode permission. Contributed by He Xiaoqiao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/975d6068
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/975d6068
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/975d6068

Branch: refs/heads/branch-3.1
Commit: 975d60685eaf9961bdbd3547600b3e38bb088835
Parents: c0ac0a5
Author: drankye <zh...@alibaba-inc.com>
Authored: Mon Aug 13 17:32:56 2018 +0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:53:32 2018 -0700

----------------------------------------------------------------------
 .../server/namenode/FSPermissionChecker.java    |  2 +-
 .../namenode/TestINodeAttributeProvider.java    | 43 ++++++++++++++++++--
 2 files changed, 41 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/975d6068/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 354b4e3..f70963c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -409,7 +409,7 @@ public class FSPermissionChecker implements AccessControlEnforcer {
     }
     final FsPermission mode = inode.getFsPermission();
     final AclFeature aclFeature = inode.getAclFeature();
-    if (aclFeature != null) {
+    if (aclFeature != null && aclFeature.getEntriesSize() > 0) {
       // It's possible that the inode has a default ACL but no access ACL.
       int firstEntry = aclFeature.getEntryAt(0);
       if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/975d6068/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index 9c7dcd3..b3bab06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -57,6 +57,11 @@ public class TestINodeAttributeProvider {
   public static class MyAuthorizationProvider extends INodeAttributeProvider {
 
     public static class MyAccessControlEnforcer implements AccessControlEnforcer {
+      AccessControlEnforcer ace;
+
+      public MyAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) {
+        this.ace = defaultEnforcer;
+      }
 
       @Override
       public void checkPermission(String fsOwner, String supergroup,
@@ -65,6 +70,13 @@ public class TestINodeAttributeProvider {
           int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
           FsAction parentAccess, FsAction access, FsAction subAccess,
           boolean ignoreEmptyDir) throws AccessControlException {
+        if (ancestorIndex > 1
+            && inodes[1].getLocalName().equals("user")
+            && inodes[2].getLocalName().equals("acl")) {
+          this.ace.checkPermission(fsOwner, supergroup, ugi, inodeAttrs, inodes,
+              pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner,
+              ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
+        }
         CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access);
       }
     }
@@ -84,6 +96,7 @@ public class TestINodeAttributeProvider {
         final INodeAttributes inode) {
       CALLED.add("getAttributes");
       final boolean useDefault = useDefault(pathElements);
+      final boolean useNullAcl = useNullAclFeature(pathElements);
       return new INodeAttributes() {
         @Override
         public boolean isDirectory() {
@@ -126,7 +139,10 @@ public class TestINodeAttributeProvider {
         @Override
         public AclFeature getAclFeature() {
           AclFeature f;
-          if (useDefault) {
+          if (useNullAcl) {
+            int[] entries = new int[0];
+            f = new AclFeature(entries);
+          } else if (useDefault) {
             f = inode.getAclFeature();
           } else {
             AclEntry acl = new AclEntry.Builder().setType(AclEntryType.GROUP).
@@ -167,8 +183,8 @@ public class TestINodeAttributeProvider {
 
     @Override
     public AccessControlEnforcer getExternalAccessControlEnforcer(
-        AccessControlEnforcer deafultEnforcer) {
-      return new MyAccessControlEnforcer();
+        AccessControlEnforcer defaultEnforcer) {
+      return new MyAccessControlEnforcer(defaultEnforcer);
     }
 
     private boolean useDefault(String[] pathElements) {
@@ -176,6 +192,11 @@ public class TestINodeAttributeProvider {
           !(pathElements[0].equals("user") && pathElements[1].equals("authz"));
     }
 
+    private boolean useNullAclFeature(String[] pathElements) {
+      return (pathElements.length > 2)
+          && pathElements[1].equals("user")
+          && pathElements[2].equals("acl");
+    }
   }
 
   @Before
@@ -368,4 +389,20 @@ public class TestINodeAttributeProvider {
       });
     }
   }
+
+  @Test
+  public void testAclFeature() throws Exception {
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+            "testuser", new String[]{"testgroup"});
+    ugi.doAs((PrivilegedExceptionAction<Object>) () -> {
+      FileSystem fs = miniDFS.getFileSystem();
+      Path aclDir = new Path("/user/acl");
+      fs.mkdirs(aclDir);
+      Path aclChildDir = new Path(aclDir, "subdir");
+      fs.mkdirs(aclChildDir);
+      AclStatus aclStatus = fs.getAclStatus(aclDir);
+      Assert.assertEquals(0, aclStatus.getEntries().size());
+      return null;
+    });
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/14] hadoop git commit: HADOOP-14212. Addendum patch: Expose SecurityEnabled boolean field in JMX for other services besides NameNode. Contributed by Adam Antal.

Posted by ar...@apache.org.
HADOOP-14212. Addendum patch: Expose SecurityEnabled boolean field in JMX for other services besides NameNode. Contributed by Adam Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d155de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d155de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d155de1

Branch: refs/heads/branch-3.1
Commit: 0d155de1591781ab374803ad79c12cf1e91f1692
Parents: 78fb14b
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Aug 14 18:24:32 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:49:28 2018 -0700

----------------------------------------------------------------------
 .../server/nodemanager/NodeManagerMXBean.java   | 38 +++++++++++++
 .../nodemanager/TestNodeManagerMXBean.java      | 56 ++++++++++++++++++++
 .../resourcemanager/ResourceManagerMXBean.java  | 38 +++++++++++++
 .../TestResourceManagerMXBean.java              | 56 ++++++++++++++++++++
 4 files changed, 188 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d155de1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java
new file mode 100644
index 0000000..b4ab0aa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerMXBean.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This is the JMX management interface for NodeManager.
+ * End users shouldn't be implementing these interfaces, and instead
+ * access this information through the JMX APIs.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface NodeManagerMXBean {
+  /**
+   * Gets if security is enabled.
+   *
+   * @return true, if security is enabled.
+   * */
+  boolean isSecurityEnabled();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d155de1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
new file mode 100644
index 0000000..80b915c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+import java.lang.management.ManagementFactory;
+
+/**
+ * Class for testing {@link NodeManagerMXBean} implementation.
+ */
+public class TestNodeManagerMXBean {
+  public static final Log LOG = LogFactory.getLog(
+          TestNodeManagerMXBean.class);
+
+  @Test
+  public void testNodeManagerMXBean() throws Exception {
+    try (NodeManager nodeManager = new NodeManager()) {
+      Configuration conf = new YarnConfiguration();
+      UserGroupInformation.setConfiguration(conf);
+      nodeManager.init(conf);
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=NodeManager,name=NodeManager");
+
+      // Get attribute "SecurityEnabled"
+      boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
+              "SecurityEnabled");
+      Assert.assertEquals(nodeManager.isSecurityEnabled(), securityEnabled);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d155de1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManagerMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManagerMXBean.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManagerMXBean.java
new file mode 100644
index 0000000..7a3c077
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManagerMXBean.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This is the JMX management interface for ResourceManager.
+ * End users shouldn't be implementing these interfaces, and instead
+ * access this information through the JMX APIs.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface ResourceManagerMXBean {
+  /**
+   * Gets if security is enabled.
+   *
+   * @return true, if security is enabled.
+   * */
+  boolean isSecurityEnabled();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d155de1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManagerMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManagerMXBean.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManagerMXBean.java
new file mode 100644
index 0000000..7b6fa78
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManagerMXBean.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+import java.lang.management.ManagementFactory;
+
+/**
+ * Class for testing {@link ResourceManagerMXBean} implementation.
+ */
+public class TestResourceManagerMXBean {
+  public static final Log LOG = LogFactory.getLog(
+          TestResourceManagerMXBean.class);
+
+  @Test
+  public void testResourceManagerMXBean() throws Exception {
+    try (ResourceManager resourceManager = new ResourceManager()) {
+      Configuration conf = new YarnConfiguration();
+      UserGroupInformation.setConfiguration(conf);
+      resourceManager.init(conf);
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+              "Hadoop:service=ResourceManager,name=ResourceManager");
+
+      // Get attribute "SecurityEnabled"
+      boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
+              "SecurityEnabled");
+      Assert.assertEquals(resourceManager.isSecurityEnabled(), securityEnabled);
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/14] hadoop git commit: HADOOP-9214. Create a new touch command to allow modifying atime and mtime. Contributed by Hrishikesh Gadre.

Posted by ar...@apache.org.
HADOOP-9214. Create a new touch command to allow modifying atime and mtime. Contributed by Hrishikesh Gadre.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3712b79b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3712b79b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3712b79b

Branch: refs/heads/branch-3.1
Commit: 3712b79b38754f5e1710d29ffc3bb3576bacf02e
Parents: a630a27
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Aug 17 10:53:22 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:59:14 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/shell/FsCommand.java   |   2 +-
 .../java/org/apache/hadoop/fs/shell/Touch.java  |  85 --------
 .../apache/hadoop/fs/shell/TouchCommands.java   | 198 +++++++++++++++++++
 .../src/site/markdown/FileSystemShell.md        |  32 +++
 .../org/apache/hadoop/fs/TestFsShellTouch.java  | 103 ++++++++++
 .../src/test/resources/testConf.xml             |  51 +++++
 6 files changed, 385 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 4a13414..784bbf3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -66,7 +66,7 @@ abstract public class FsCommand extends Command {
     factory.registerCommands(Tail.class);
     factory.registerCommands(Head.class);
     factory.registerCommands(Test.class);
-    factory.registerCommands(Touch.class);
+    factory.registerCommands(TouchCommands.class);
     factory.registerCommands(Truncate.class);
     factory.registerCommands(SnapshotCommands.class);
     factory.registerCommands(XAttrCommands.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
deleted file mode 100644
index a6c751e..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.shell;
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.PathIOException;
-import org.apache.hadoop.fs.PathIsDirectoryException;
-import org.apache.hadoop.fs.PathNotFoundException;
-
-/**
- * Unix touch like commands
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-
-class Touch extends FsCommand {
-  public static void registerCommands(CommandFactory factory) {
-    factory.addClass(Touchz.class, "-touchz");
-  }
-
-  /**
-   * (Re)create zero-length file at the specified path.
-   * This will be replaced by a more UNIX-like touch when files may be
-   * modified.
-   */
-  public static class Touchz extends Touch {
-    public static final String NAME = "touchz";
-    public static final String USAGE = "<path> ...";
-    public static final String DESCRIPTION =
-      "Creates a file of zero length " +
-      "at <path> with current time as the timestamp of that <path>. " +
-      "An error is returned if the file exists with non-zero length\n";
-
-    @Override
-    protected void processOptions(LinkedList<String> args) {
-      CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
-      cf.parse(args);
-    }
-
-    @Override
-    protected void processPath(PathData item) throws IOException {
-      if (item.stat.isDirectory()) {
-        // TODO: handle this
-        throw new PathIsDirectoryException(item.toString());
-      }
-      if (item.stat.getLen() != 0) {
-        throw new PathIOException(item.toString(), "Not a zero-length file");
-      }
-      touchz(item);
-    }
-
-    @Override
-    protected void processNonexistentPath(PathData item) throws IOException {
-      if (!item.parentExists()) {
-        throw new PathNotFoundException(item.toString())
-            .withFullyQualifiedPath(item.path.toUri().toString());
-      }
-      touchz(item);
-    }
-
-    private void touchz(PathData item) throws IOException {
-      item.fs.create(item.path).close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java
new file mode 100644
index 0000000..be174b5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.LinkedList;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.fs.PathIsDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Unix touch like commands
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+
+public class TouchCommands extends FsCommand {
+  public static void registerCommands(CommandFactory factory) {
+    factory.addClass(Touchz.class, "-touchz");
+    factory.addClass(Touch.class, "-touch");
+  }
+
+  /**
+   * (Re)create zero-length file at the specified path.
+   * This will be replaced by a more UNIX-like touch when files may be
+   * modified.
+   */
+  public static class Touchz extends TouchCommands {
+    public static final String NAME = "touchz";
+    public static final String USAGE = "<path> ...";
+    public static final String DESCRIPTION =
+      "Creates a file of zero length " +
+      "at <path> with current time as the timestamp of that <path>. " +
+      "An error is returned if the file exists with non-zero length\n";
+
+    @Override
+    protected void processOptions(LinkedList<String> args) {
+      CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
+      cf.parse(args);
+    }
+
+    @Override
+    protected void processPath(PathData item) throws IOException {
+      if (item.stat.isDirectory()) {
+        // TODO: handle this
+        throw new PathIsDirectoryException(item.toString());
+      }
+      if (item.stat.getLen() != 0) {
+        throw new PathIOException(item.toString(), "Not a zero-length file");
+      }
+      touchz(item);
+    }
+
+    @Override
+    protected void processNonexistentPath(PathData item) throws IOException {
+      if (!item.parentExists()) {
+        throw new PathNotFoundException(item.toString())
+            .withFullyQualifiedPath(item.path.toUri().toString());
+      }
+      touchz(item);
+    }
+
+    private void touchz(PathData item) throws IOException {
+      item.fs.create(item.path).close();
+    }
+  }
+
+  /**
+   * A UNIX like touch command.
+   */
+  public static class Touch extends TouchCommands {
+    private static final String OPTION_CHANGE_ONLY_MODIFICATION_TIME = "m";
+    private static final String OPTION_CHANGE_ONLY_ACCESS_TIME = "a";
+    private static final String OPTION_USE_TIMESTAMP = "t";
+    private static final String OPTION_DO_NOT_CREATE_FILE = "c";
+
+    public static final String NAME = "touch";
+    public static final String USAGE = "[-" + OPTION_CHANGE_ONLY_ACCESS_TIME
+        + "] [-" + OPTION_CHANGE_ONLY_MODIFICATION_TIME + "] [-"
+        + OPTION_USE_TIMESTAMP + " TIMESTAMP ] [-" + OPTION_DO_NOT_CREATE_FILE
+        + "] <path> ...";
+    public static final String DESCRIPTION =
+        "Updates the access and modification times of the file specified by the"
+            + " <path> to the current time. If the file does not exist, then a zero"
+            + " length file is created at <path> with current time as the timestamp"
+            + " of that <path>.\n"
+            + "-" + OPTION_CHANGE_ONLY_ACCESS_TIME
+            + " Change only the access time \n" + "-"
+            + OPTION_CHANGE_ONLY_MODIFICATION_TIME
+            + " Change only the modification time \n" + "-"
+            + OPTION_USE_TIMESTAMP + " TIMESTAMP"
+            + " Use specified timestamp (in format yyyyMMddHHmmss) instead of current time \n"
+            + "-" + OPTION_DO_NOT_CREATE_FILE + " Do not create any files";
+
+    private boolean changeModTime = false;
+    private boolean changeAccessTime = false;
+    private boolean doNotCreate = false;
+    private String timestamp;
+    private final SimpleDateFormat dateFormat =
+        new SimpleDateFormat("yyyyMMdd:HHmmss");
+
+    @InterfaceAudience.Private
+    @VisibleForTesting
+    public DateFormat getDateFormat() {
+      return dateFormat;
+    }
+
+    @Override
+    protected void processOptions(LinkedList<String> args) {
+      this.timestamp =
+          StringUtils.popOptionWithArgument("-" + OPTION_USE_TIMESTAMP, args);
+
+      CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
+          OPTION_USE_TIMESTAMP, OPTION_CHANGE_ONLY_ACCESS_TIME,
+          OPTION_CHANGE_ONLY_MODIFICATION_TIME);
+      cf.parse(args);
+      this.changeModTime = cf.getOpt(OPTION_CHANGE_ONLY_MODIFICATION_TIME);
+      this.changeAccessTime = cf.getOpt(OPTION_CHANGE_ONLY_ACCESS_TIME);
+      this.doNotCreate = cf.getOpt(OPTION_DO_NOT_CREATE_FILE);
+    }
+
+    @Override
+    protected void processPath(PathData item) throws IOException {
+      if (item.stat.isDirectory()) {
+        throw new PathIsDirectoryException(item.toString());
+      }
+      touch(item);
+    }
+
+    @Override
+    protected void processNonexistentPath(PathData item) throws IOException {
+      if (!item.parentExists()) {
+        throw new PathNotFoundException(item.toString())
+            .withFullyQualifiedPath(item.path.toUri().toString());
+      }
+      touch(item);
+    }
+
+    private void touch(PathData item) throws IOException {
+      if (!item.fs.exists(item.path)) {
+        if (doNotCreate) {
+          return;
+        }
+        item.fs.create(item.path).close();
+        if (timestamp != null) {
+          // update the time only if user specified a timestamp using -t option.
+          updateTime(item);
+        }
+      } else {
+        updateTime(item);
+      }
+    }
+
+    private void updateTime(PathData item) throws IOException {
+      long time = System.currentTimeMillis();
+      if (timestamp != null) {
+        try {
+          time = dateFormat.parse(timestamp).getTime();
+        } catch (ParseException e) {
+          throw new IllegalArgumentException(
+              "Unable to parse the specified timestamp " + timestamp, e);
+        }
+      }
+      if (changeModTime ^ changeAccessTime) {
+        long atime = changeModTime ? -1 : time;
+        long mtime = changeAccessTime ? -1 : time;
+        item.fs.setTimes(item.path, mtime, atime);
+      } else {
+        item.fs.setTimes(item.path, time, time);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index ec9d3c3..d9567b9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -741,6 +741,38 @@ Usage: `hadoop fs -text <src> `
 
 Takes a source file and outputs the file in text format. The allowed formats are zip and TextRecordInputStream.
 
+touch
+------
+
+Usage: `hadoop fs -touch [-a] [-m] [-t TIMESTAMP] [-c] URI [URI ...]`
+
+Updates the access and modification times of the file specified by the URI to the current time.
+If the file does not exist, then a zero length file is created at URI with current time as the
+timestamp of that URI.
+
+* Use -a option to change only the access time
+* Use -m option to change only the modification time
+* Use -t option to specify timestamp (in format yyyyMMddHHmmss) instead of current time
+* Use -c option to not create file if it does not exist
+
+The timestamp format is as follows
+* yyyy Four digit year (e.g. 2018)
+* MM Two digit month of the year (e.g. 08 for month of August)
+* dd Two digit day of the month (e.g. 01 for first day of the month)
+* HH Two digit hour of the day using 24 hour notation (e.g. 23 stands for 11 pm, 11 stands for 11 am)
+* mm Two digit minutes of the hour
+* ss Two digit seconds of the minute
+e.g. 20180809230000 represents August 9th 2018, 11pm
+
+Example:
+
+* `hadoop fs -touch pathname`
+* `hadoop fs -touch -m -t 20180809230000 pathname`
+* `hadoop fs -touch -t 20180809230000 pathname`
+* `hadoop fs -touch -a pathname`
+
+Exit Code: Returns 0 on success and -1 on error.
+
 touchz
 ------
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
index 5fe4e39..2e7cb5d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
@@ -21,7 +21,11 @@ import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertThat;
 
+import java.text.ParseException;
+import java.util.Date;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.shell.TouchCommands.Touch;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Before;
@@ -85,4 +89,103 @@ public class TestFsShellTouch {
     assertThat("Expected failed touchz in a non-existent directory",
         shellRun("-touchz", noDirName + "/foo"), is(not(0)));
   }
+
+  @Test
+  public void testTouch() throws Exception {
+    // Ensure newFile2 does not exist
+    final String newFileName = "newFile2";
+    final Path newFile = new Path(newFileName);
+    lfs.delete(newFile, true);
+    assertThat(lfs.exists(newFile), is(false));
+
+    {
+      assertThat(
+          "Expected successful touch on a non-existent file with -c option",
+          shellRun("-touch", "-c", newFileName), is(not(0)));
+      assertThat(lfs.exists(newFile), is(false));
+    }
+
+    {
+      String strTime = formatTimestamp(System.currentTimeMillis());
+      Date dateObj = parseTimestamp(strTime);
+
+      assertThat(
+          "Expected successful touch on a new file with a specified timestamp",
+          shellRun("-touch", "-t", strTime, newFileName), is(0));
+      FileStatus new_status = lfs.getFileStatus(newFile);
+      assertThat(new_status.getAccessTime(), is(dateObj.getTime()));
+      assertThat(new_status.getModificationTime(), is(dateObj.getTime()));
+    }
+
+    FileStatus fstatus = lfs.getFileStatus(newFile);
+
+    {
+      String strTime = formatTimestamp(System.currentTimeMillis());
+      Date dateObj = parseTimestamp(strTime);
+
+      assertThat("Expected successful touch with a specified access time",
+          shellRun("-touch", "-a", "-t", strTime, newFileName), is(0));
+      FileStatus new_status = lfs.getFileStatus(newFile);
+      // Verify if access time is recorded correctly (and modification time
+      // remains unchanged).
+      assertThat(new_status.getAccessTime(), is(dateObj.getTime()));
+      assertThat(new_status.getModificationTime(),
+          is(fstatus.getModificationTime()));
+    }
+
+    fstatus = lfs.getFileStatus(newFile);
+
+    {
+      String strTime = formatTimestamp(System.currentTimeMillis());
+      Date dateObj = parseTimestamp(strTime);
+
+      assertThat(
+          "Expected successful touch with a specified modificatiom time",
+          shellRun("-touch", "-m", "-t", strTime, newFileName), is(0));
+      // Verify if modification time is recorded correctly (and access time
+      // remains unchanged).
+      FileStatus new_status = lfs.getFileStatus(newFile);
+      assertThat(new_status.getAccessTime(), is(fstatus.getAccessTime()));
+      assertThat(new_status.getModificationTime(), is(dateObj.getTime()));
+    }
+
+    {
+      String strTime = formatTimestamp(System.currentTimeMillis());
+      Date dateObj = parseTimestamp(strTime);
+
+      assertThat("Expected successful touch with a specified timestamp",
+          shellRun("-touch", "-t", strTime, newFileName), is(0));
+
+      // Verify if both modification and access times are recorded correctly
+      FileStatus new_status = lfs.getFileStatus(newFile);
+      assertThat(new_status.getAccessTime(), is(dateObj.getTime()));
+      assertThat(new_status.getModificationTime(), is(dateObj.getTime()));
+    }
+
+    {
+      String strTime = formatTimestamp(System.currentTimeMillis());
+      Date dateObj = parseTimestamp(strTime);
+
+      assertThat("Expected successful touch with a specified timestamp",
+          shellRun("-touch", "-a", "-m", "-t", strTime, newFileName), is(0));
+
+      // Verify if both modification and access times are recorded correctly
+      FileStatus new_status = lfs.getFileStatus(newFile);
+      assertThat(new_status.getAccessTime(), is(dateObj.getTime()));
+      assertThat(new_status.getModificationTime(), is(dateObj.getTime()));
+    }
+
+    {
+      assertThat("Expected failed touch with a missing timestamp",
+          shellRun("-touch", "-t", newFileName), is(not(0)));
+    }
+  }
+
+  private String formatTimestamp(long timeInMillis) {
+    return (new Touch()).getDateFormat().format(new Date(timeInMillis));
+  }
+
+  private Date parseTimestamp(String tstamp) throws ParseException {
+    return (new Touch()).getDateFormat().parse(tstamp);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3712b79b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 6a3d53a..1798563 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -840,6 +840,57 @@
     </test>
 
     <test> <!-- TESTED -->
+      <description>help: help for touch</description>
+      <test-commands>
+        <command>-help touch</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-touch \[-a\] \[-m\] \[-t TIMESTAMP \] \[-c\] &lt;path&gt; \.\.\. :( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Updates the access and modification times of the file specified by the &lt;path&gt; to( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*the current time. If the file does not exist, then a zero length file is created( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*at &lt;path&gt; with current time as the timestamp of that &lt;path&gt;.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-a\s+Change only the access time( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-a\s+Change only the access time( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-m\s+Change only the modification time( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-t\s+TIMESTAMP\s+Use specified timestamp \(in format yyyyMMddHHmmss\) instead of</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*current time( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-c\s+Do not create any files( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
       <description>help: help for touchz</description>
       <test-commands>
         <command>-help touchz</command>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/14] hadoop git commit: HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed by Kitti Nanasi.

Posted by ar...@apache.org.
HDFS-13658. Expose HighestPriorityLowRedundancy blocks statistics. Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0ac0a53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0ac0a53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0ac0a53

Branch: refs/heads/branch-3.1
Commit: c0ac0a533701a56b09b302a660c3233971512168
Parents: 366517b
Author: Xiao Chen <xi...@apache.org>
Authored: Wed Aug 8 10:36:44 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:52:28 2018 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md  |  2 +
 .../hadoop/hdfs/protocol/ECBlockGroupStats.java | 27 +++++++++++-
 .../hdfs/protocol/ReplicatedBlockStats.java     | 28 ++++++++++++-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 21 ++++++++++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 ++
 .../federation/metrics/NamenodeBeanMetrics.java | 10 +++++
 .../server/federation/router/ErasureCoding.java | 13 ++++++
 .../server/blockmanagement/BlockManager.java    |  8 ++++
 .../blockmanagement/LowRedundancyBlocks.java    | 28 +++++++++++++
 .../hdfs/server/namenode/FSNamesystem.java      | 20 ++++++++-
 .../hdfs/server/namenode/NameNodeMXBean.java    | 18 ++++++++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 10 +++++
 .../TestLowRedundancyBlockQueues.java           | 43 +++++++++++++-------
 .../namenode/metrics/TestNameNodeMetrics.java   | 12 ++++++
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 32 +++++++++++----
 15 files changed, 247 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index f1dbc50..4d59c6e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -240,6 +240,8 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed heartbeat |
 | `NumStaleStorages` | Number of storages marked as content stale (after NameNode restart/failover before first block report is received) |
 | `MissingReplOneBlocks` | Current number of missing blocks with replication factor 1 |
+| `HighestPriorityLowRedundancyReplicatedBlocks` | Current number of non-corrupt, low redundancy replicated blocks with the highest risk of loss (have 0 or 1 replica). Will be recovered with the highest priority. |
+| `HighestPriorityLowRedundancyECBlocks` | Current number of non-corrupt, low redundancy EC blocks with the highest risk of loss. Will be recovered with the highest priority. |
 | `NumFilesUnderConstruction` | Current number of files under construction |
 | `NumActiveClients` | Current number of active clients holding lease |
 | `HAState` | (HA-only) Current state of the NameNode: initializing or active or standby or stopping state |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
index 9a8ad8c..3dde604 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
@@ -34,15 +34,26 @@ public final class ECBlockGroupStats {
   private final long missingBlockGroups;
   private final long bytesInFutureBlockGroups;
   private final long pendingDeletionBlocks;
+  private final Long highestPriorityLowRedundancyBlocks;
 
   public ECBlockGroupStats(long lowRedundancyBlockGroups,
       long corruptBlockGroups, long missingBlockGroups,
       long bytesInFutureBlockGroups, long pendingDeletionBlocks) {
+    this(lowRedundancyBlockGroups, corruptBlockGroups, missingBlockGroups,
+        bytesInFutureBlockGroups, pendingDeletionBlocks, null);
+  }
+
+  public ECBlockGroupStats(long lowRedundancyBlockGroups,
+      long corruptBlockGroups, long missingBlockGroups,
+      long bytesInFutureBlockGroups, long pendingDeletionBlocks,
+      Long highestPriorityLowRedundancyBlocks) {
     this.lowRedundancyBlockGroups = lowRedundancyBlockGroups;
     this.corruptBlockGroups = corruptBlockGroups;
     this.missingBlockGroups = missingBlockGroups;
     this.bytesInFutureBlockGroups = bytesInFutureBlockGroups;
     this.pendingDeletionBlocks = pendingDeletionBlocks;
+    this.highestPriorityLowRedundancyBlocks
+        = highestPriorityLowRedundancyBlocks;
   }
 
   public long getBytesInFutureBlockGroups() {
@@ -65,6 +76,14 @@ public final class ECBlockGroupStats {
     return pendingDeletionBlocks;
   }
 
+  public boolean hasHighestPriorityLowRedundancyBlocks() {
+    return getHighestPriorityLowRedundancyBlocks() != null;
+  }
+
+  public Long getHighestPriorityLowRedundancyBlocks() {
+    return highestPriorityLowRedundancyBlocks;
+  }
+
   @Override
   public String toString() {
     StringBuilder statsBuilder = new StringBuilder();
@@ -76,8 +95,12 @@ public final class ECBlockGroupStats {
         .append(", BytesInFutureBlockGroups=").append(
             getBytesInFutureBlockGroups())
         .append(", PendingDeletionBlocks=").append(
-            getPendingDeletionBlocks())
-        .append("]");
+            getPendingDeletionBlocks());
+    if (hasHighestPriorityLowRedundancyBlocks()) {
+      statsBuilder.append(", HighestPriorityLowRedundancyBlocks=")
+          .append(getHighestPriorityLowRedundancyBlocks());
+    }
+    statsBuilder.append("]");
     return statsBuilder.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java
index 49aaded..c210003 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java
@@ -35,17 +35,29 @@ public final class ReplicatedBlockStats {
   private final long missingReplicationOneBlocks;
   private final long bytesInFutureBlocks;
   private final long pendingDeletionBlocks;
+  private final Long highestPriorityLowRedundancyBlocks;
 
   public ReplicatedBlockStats(long lowRedundancyBlocks,
       long corruptBlocks, long missingBlocks,
       long missingReplicationOneBlocks, long bytesInFutureBlocks,
       long pendingDeletionBlocks) {
+    this(lowRedundancyBlocks, corruptBlocks, missingBlocks,
+        missingReplicationOneBlocks, bytesInFutureBlocks, pendingDeletionBlocks,
+        null);
+  }
+
+  public ReplicatedBlockStats(long lowRedundancyBlocks,
+      long corruptBlocks, long missingBlocks,
+      long missingReplicationOneBlocks, long bytesInFutureBlocks,
+      long pendingDeletionBlocks, Long highestPriorityLowRedundancyBlocks) {
     this.lowRedundancyBlocks = lowRedundancyBlocks;
     this.corruptBlocks = corruptBlocks;
     this.missingBlocks = missingBlocks;
     this.missingReplicationOneBlocks = missingReplicationOneBlocks;
     this.bytesInFutureBlocks = bytesInFutureBlocks;
     this.pendingDeletionBlocks = pendingDeletionBlocks;
+    this.highestPriorityLowRedundancyBlocks
+        = highestPriorityLowRedundancyBlocks;
   }
 
   public long getLowRedundancyBlocks() {
@@ -72,6 +84,14 @@ public final class ReplicatedBlockStats {
     return pendingDeletionBlocks;
   }
 
+  public boolean hasHighestPriorityLowRedundancyBlocks() {
+    return getHighestPriorityLowRedundancyBlocks() != null;
+  }
+
+  public Long getHighestPriorityLowRedundancyBlocks(){
+    return highestPriorityLowRedundancyBlocks;
+  }
+
   @Override
   public String toString() {
     StringBuilder statsBuilder = new StringBuilder();
@@ -83,8 +103,12 @@ public final class ReplicatedBlockStats {
             getMissingReplicationOneBlocks())
         .append(", BytesInFutureBlocks=").append(getBytesInFutureBlocks())
         .append(", PendingDeletionBlocks=").append(
-            getPendingDeletionBlocks())
-        .append("]");
+            getPendingDeletionBlocks());
+    if (hasHighestPriorityLowRedundancyBlocks()) {
+        statsBuilder.append(", HighestPriorityLowRedundancyBlocks=").append(
+            getHighestPriorityLowRedundancyBlocks());
+    }
+    statsBuilder.append("]");
     return statsBuilder.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 490ccb4..4a5a493 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -1990,6 +1990,13 @@ public class PBHelperClient {
 
   public static ReplicatedBlockStats convert(
       GetFsReplicatedBlockStatsResponseProto res) {
+    if (res.hasHighestPrioLowRedundancyBlocks()) {
+      return new ReplicatedBlockStats(res.getLowRedundancy(),
+          res.getCorruptBlocks(), res.getMissingBlocks(),
+          res.getMissingReplOneBlocks(), res.getBlocksInFuture(),
+          res.getPendingDeletionBlocks(),
+          res.getHighestPrioLowRedundancyBlocks());
+    }
     return new ReplicatedBlockStats(res.getLowRedundancy(),
         res.getCorruptBlocks(), res.getMissingBlocks(),
         res.getMissingReplOneBlocks(), res.getBlocksInFuture(),
@@ -1998,6 +2005,12 @@ public class PBHelperClient {
 
   public static ECBlockGroupStats convert(
       GetFsECBlockGroupStatsResponseProto res) {
+    if (res.hasHighestPrioLowRedundancyBlocks()) {
+      return new ECBlockGroupStats(res.getLowRedundancy(),
+          res.getCorruptBlocks(), res.getMissingBlocks(),
+          res.getBlocksInFuture(), res.getPendingDeletionBlocks(),
+          res.getHighestPrioLowRedundancyBlocks());
+    }
     return new ECBlockGroupStats(res.getLowRedundancy(),
         res.getCorruptBlocks(), res.getMissingBlocks(),
         res.getBlocksInFuture(), res.getPendingDeletionBlocks());
@@ -2432,6 +2445,10 @@ public class PBHelperClient {
         replicatedBlockStats.getBytesInFutureBlocks());
     result.setPendingDeletionBlocks(
         replicatedBlockStats.getPendingDeletionBlocks());
+    if (replicatedBlockStats.hasHighestPriorityLowRedundancyBlocks()) {
+      result.setHighestPrioLowRedundancyBlocks(
+          replicatedBlockStats.getHighestPriorityLowRedundancyBlocks());
+    }
     return result.build();
   }
 
@@ -2447,6 +2464,10 @@ public class PBHelperClient {
         ecBlockGroupStats.getBytesInFutureBlockGroups());
     result.setPendingDeletionBlocks(
         ecBlockGroupStats.getPendingDeletionBlocks());
+    if (ecBlockGroupStats.hasHighestPriorityLowRedundancyBlocks()) {
+      result.setHighestPrioLowRedundancyBlocks(
+          ecBlockGroupStats.getHighestPriorityLowRedundancyBlocks());
+    }
     return result.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index e51aeda..ae4c93e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -348,6 +348,8 @@ message GetFsReplicatedBlockStatsResponseProto {
   required uint64 missing_repl_one_blocks = 4;
   required uint64 blocks_in_future = 5;
   required uint64 pending_deletion_blocks = 6;
+  optional uint64 highest_prio_low_redundancy_blocks = 7;
+
 }
 
 message GetFsECBlockGroupStatsRequestProto { // no input paramters
@@ -359,6 +361,7 @@ message GetFsECBlockGroupStatsResponseProto {
   required uint64 missing_blocks = 3;
   required uint64 blocks_in_future = 4;
   required uint64 pending_deletion_blocks = 5;
+  optional uint64 highest_prio_low_redundancy_blocks = 6;
 }
 
 enum DatanodeReportTypeProto {  // type of the datanode report

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
index 4d22ae7..e8ebf0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
@@ -321,6 +321,16 @@ public class NamenodeBeanMetrics
   }
 
   @Override
+  public long getHighestPriorityLowRedundancyReplicatedBlocks() {
+    return 0;
+  }
+
+  @Override
+  public long getHighestPriorityLowRedundancyECBlocks() {
+    return 0;
+  }
+
+  @Override
   public String getCorruptFiles() {
     return "N/A";
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
index d2b2d50..480b232 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
@@ -185,12 +185,25 @@ public class ErasureCoding {
     long missingBlockGroups = 0;
     long bytesInFutureBlockGroups = 0;
     long pendingDeletionBlocks = 0;
+    long highestPriorityLowRedundancyBlocks = 0;
+    boolean hasHighestPriorityLowRedundancyBlocks = false;
+
     for (ECBlockGroupStats stats : allStats.values()) {
       lowRedundancyBlockGroups += stats.getLowRedundancyBlockGroups();
       corruptBlockGroups += stats.getCorruptBlockGroups();
       missingBlockGroups += stats.getMissingBlockGroups();
       bytesInFutureBlockGroups += stats.getBytesInFutureBlockGroups();
       pendingDeletionBlocks += stats.getPendingDeletionBlocks();
+      if (stats.hasHighestPriorityLowRedundancyBlocks()) {
+        hasHighestPriorityLowRedundancyBlocks = true;
+        highestPriorityLowRedundancyBlocks +=
+            stats.getHighestPriorityLowRedundancyBlocks();
+      }
+    }
+    if (hasHighestPriorityLowRedundancyBlocks) {
+      return new ECBlockGroupStats(lowRedundancyBlockGroups, corruptBlockGroups,
+          missingBlockGroups, bytesInFutureBlockGroups, pendingDeletionBlocks,
+          highestPriorityLowRedundancyBlocks);
     }
     return new ECBlockGroupStats(lowRedundancyBlockGroups, corruptBlockGroups,
         missingBlockGroups, bytesInFutureBlockGroups, pendingDeletionBlocks);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 72ea1c0..bac89bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4428,6 +4428,14 @@ public class BlockManager implements BlockStatsMXBean {
     return this.neededReconstruction.getCorruptReplicationOneBlockSize();
   }
 
+  public long getHighestPriorityReplicatedBlockCount(){
+    return this.neededReconstruction.getHighestPriorityReplicatedBlockCount();
+  }
+
+  public long getHighestPriorityECBlockCount(){
+    return this.neededReconstruction.getHighestPriorityECBlockCount();
+  }
+
   public BlockInfo addBlockCollection(BlockInfo block,
       BlockCollection bc) {
     return blocksMap.addBlockCollection(block, bc);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index e3f228d..40ea980 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -92,6 +92,10 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
   private final LongAdder corruptReplicationOneBlocks = new LongAdder();
   private final LongAdder lowRedundancyECBlockGroups = new LongAdder();
   private final LongAdder corruptECBlockGroups = new LongAdder();
+  private final LongAdder highestPriorityLowRedundancyReplicatedBlocks
+      = new LongAdder();
+  private final LongAdder highestPriorityLowRedundancyECBlocks
+      = new LongAdder();
 
   /** Create an object. */
   LowRedundancyBlocks() {
@@ -162,6 +166,18 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
     return corruptReplicationOneBlocks.longValue();
   }
 
+  /** Return the number of under replicated blocks
+   * with the highest priority to recover */
+  long getHighestPriorityReplicatedBlockCount() {
+    return highestPriorityLowRedundancyReplicatedBlocks.longValue();
+  }
+
+  /** Return the number of under replicated EC blocks
+   * with the highest priority to recover */
+  long getHighestPriorityECBlockCount() {
+    return highestPriorityLowRedundancyECBlocks.longValue();
+  }
+
   /**
    *  Return low redundancy striped blocks excluding corrupt blocks.
    */
@@ -300,6 +316,9 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
       if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
         corruptECBlockGroups.increment();
       }
+      if (priLevel == QUEUE_HIGHEST_PRIORITY) {
+        highestPriorityLowRedundancyECBlocks.increment();
+      }
     } else {
       lowRedundancyBlocks.increment();
       if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
@@ -308,6 +327,9 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
           corruptReplicationOneBlocks.increment();
         }
       }
+      if (priLevel == QUEUE_HIGHEST_PRIORITY) {
+        highestPriorityLowRedundancyReplicatedBlocks.increment();
+      }
     }
   }
 
@@ -380,6 +402,9 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
       if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
         corruptECBlockGroups.decrement();
       }
+      if (priLevel == QUEUE_HIGHEST_PRIORITY) {
+        highestPriorityLowRedundancyECBlocks.decrement();
+      }
     } else {
       lowRedundancyBlocks.decrement();
       if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS) {
@@ -391,6 +416,9 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
                   "should be non-negative";
         }
       }
+      if (priLevel == QUEUE_HIGHEST_PRIORITY) {
+        highestPriorityLowRedundancyReplicatedBlocks.decrement();
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2098252..5eef12b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4207,7 +4207,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return new ReplicatedBlockStats(getLowRedundancyReplicatedBlocks(),
         getCorruptReplicatedBlocks(), getMissingReplicatedBlocks(),
         getMissingReplicationOneBlocks(), getBytesInFutureReplicatedBlocks(),
-        getPendingDeletionReplicatedBlocks());
+        getPendingDeletionReplicatedBlocks(),
+        getHighestPriorityLowRedundancyReplicatedBlocks());
   }
 
   /**
@@ -4219,7 +4220,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   ECBlockGroupStats getECBlockGroupStats() {
     return new ECBlockGroupStats(getLowRedundancyECBlockGroups(),
         getCorruptECBlockGroups(), getMissingECBlockGroups(),
-        getBytesInFutureECBlockGroups(), getPendingDeletionECBlocks());
+        getBytesInFutureECBlockGroups(), getPendingDeletionECBlocks(),
+        getHighestPriorityLowRedundancyECBlocks());
   }
 
   @Override // FSNamesystemMBean
@@ -4827,6 +4829,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   @Override // ReplicatedBlocksMBean
+  @Metric({"HighestPriorityLowRedundancyReplicatedBlocks", "Number of " +
+      "replicated blocks which have the highest risk of loss."})
+  public long getHighestPriorityLowRedundancyReplicatedBlocks() {
+    return blockManager.getHighestPriorityReplicatedBlockCount();
+  }
+
+  @Override // ReplicatedBlocksMBean
+  @Metric({"HighestPriorityLowRedundancyECBlocks", "Number of erasure coded " +
+      "blocks which have the highest risk of loss."})
+  public long getHighestPriorityLowRedundancyECBlocks() {
+    return blockManager.getHighestPriorityECBlockCount();
+  }
+
+  @Override // ReplicatedBlocksMBean
   @Metric({"BytesInFutureReplicatedBlocks", "Total bytes in replicated " +
       "blocks with future generation stamp"})
   public long getBytesInFutureReplicatedBlocks() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
index e4ed3a9..5c7bbbb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
@@ -163,6 +163,24 @@ public interface NameNodeMXBean {
   public long getNumberOfMissingBlocksWithReplicationFactorOne();
 
   /**
+   * Gets the total number of replicated low redundancy blocks on the cluster
+   * with the highest risk of loss.
+   *
+   * @return the total number of low redundancy blocks on the cluster
+   * with the highest risk of loss.
+   */
+  public long getHighestPriorityLowRedundancyReplicatedBlocks();
+
+  /**
+   * Gets the total number of erasure coded low redundancy blocks on the cluster
+   * with the highest risk of loss
+   *
+   * @return the total number of low redundancy blocks on the cluster
+   * with the highest risk of loss
+   */
+  public long getHighestPriorityLowRedundancyECBlocks();
+
+  /**
    * Gets the total number of snapshottable dirs in the system.
    *
    * @return the total number of snapshottable dirs in the system

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index f793557..2333250 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -549,6 +549,11 @@ public class DFSAdmin extends FsShell {
         replicatedBlockStats.getMissingReplicaBlocks());
     System.out.println("\tMissing blocks (with replication factor 1): " +
         replicatedBlockStats.getMissingReplicationOneBlocks());
+    if (replicatedBlockStats.hasHighestPriorityLowRedundancyBlocks()) {
+      System.out.println("\tLow redundancy blocks with highest priority " +
+          "to recover: " +
+          replicatedBlockStats.getHighestPriorityLowRedundancyBlocks());
+    }
     System.out.println("\tPending deletion blocks: " +
         replicatedBlockStats.getPendingDeletionBlocks());
 
@@ -561,6 +566,11 @@ public class DFSAdmin extends FsShell {
         ecBlockGroupStats.getCorruptBlockGroups());
     System.out.println("\tMissing block groups: " +
         ecBlockGroupStats.getMissingBlockGroups());
+    if (ecBlockGroupStats.hasHighestPriorityLowRedundancyBlocks()) {
+      System.out.println("\tLow redundancy blocks with highest priority " +
+          "to recover: " +
+          ecBlockGroupStats.getHighestPriorityLowRedundancyBlocks());
+    }
     System.out.println("\tPending deletion blocks: " +
         ecBlockGroupStats.getPendingDeletionBlocks());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index 97a5a6e..cf40c39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -63,7 +63,8 @@ public class TestLowRedundancyBlockQueues {
   private void verifyBlockStats(LowRedundancyBlocks queues,
       int lowRedundancyReplicaCount, int corruptReplicaCount,
       int corruptReplicationOneCount, int lowRedundancyStripedCount,
-      int corruptStripedCount) {
+      int corruptStripedCount, int highestPriorityReplicatedBlockCount,
+      int highestPriorityECBlockCount) {
     assertEquals("Low redundancy replica count incorrect!",
         lowRedundancyReplicaCount, queues.getLowRedundancyBlocks());
     assertEquals("Corrupt replica count incorrect!",
@@ -81,6 +82,14 @@ public class TestLowRedundancyBlockQueues {
     assertEquals("LowRedundancyBlocks queue size incorrect!",
         (lowRedundancyReplicaCount + corruptReplicaCount +
         lowRedundancyStripedCount + corruptStripedCount), queues.size());
+    assertEquals("Highest priority replicated low redundancy " +
+            "blocks count is incorrect!",
+        highestPriorityReplicatedBlockCount,
+        queues.getHighestPriorityReplicatedBlockCount());
+    assertEquals("Highest priority erasure coded low redundancy " +
+            "blocks count is incorrect!",
+        highestPriorityECBlockCount,
+        queues.getHighestPriorityECBlockCount());
   }
 
   /**
@@ -100,42 +109,46 @@ public class TestLowRedundancyBlockQueues {
     // Add a block with a single entry
     assertAdded(queues, block1, 1, 0, 3);
     assertInLevel(queues, block1, LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY);
-    verifyBlockStats(queues, 1, 0, 0, 0, 0);
+    verifyBlockStats(queues, 1, 0, 0, 0, 0, 1, 0);
 
     // Repeated additions fail
     assertFalse(queues.add(block1, 1, 0, 0, 3));
-    verifyBlockStats(queues, 1, 0, 0, 0, 0);
+    verifyBlockStats(queues, 1, 0, 0, 0, 0, 1, 0);
 
     // Add a second block with two replicas
     assertAdded(queues, block2, 2, 0, 3);
     assertInLevel(queues, block2, LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY);
-    verifyBlockStats(queues, 2, 0, 0, 0, 0);
+    verifyBlockStats(queues, 2, 0, 0, 0, 0, 1, 0);
 
     // Now try to add a block that is corrupt
     assertAdded(queues, block_corrupt, 0, 0, 3);
     assertInLevel(queues, block_corrupt,
                   LowRedundancyBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
-    verifyBlockStats(queues, 2, 1, 0, 0, 0);
+    verifyBlockStats(queues, 2, 1, 0, 0, 0, 1, 0);
 
     // Insert a very insufficiently redundancy block
     assertAdded(queues, block_very_low_redundancy, 4, 0, 25);
     assertInLevel(queues, block_very_low_redundancy,
                   LowRedundancyBlocks.QUEUE_VERY_LOW_REDUNDANCY);
-    verifyBlockStats(queues, 3, 1, 0, 0, 0);
+    verifyBlockStats(queues, 3, 1, 0, 0, 0, 1, 0);
 
     // Insert a corrupt block with replication factor 1
     assertAdded(queues, block_corrupt_repl_one, 0, 0, 1);
-    verifyBlockStats(queues, 3, 2, 1, 0, 0);
+    verifyBlockStats(queues, 3, 2, 1, 0, 0, 1, 0);
 
     // Bump up the expected count for corrupt replica one block from 1 to 3
     queues.update(block_corrupt_repl_one, 0, 0, 0, 3, 0, 2);
-    verifyBlockStats(queues, 3, 2, 0, 0, 0);
+    verifyBlockStats(queues, 3, 2, 0, 0, 0, 1, 0);
 
     // Reduce the expected replicas to 1
     queues.update(block_corrupt, 0, 0, 0, 1, 0, -2);
-    verifyBlockStats(queues, 3, 2, 1, 0, 0);
+    verifyBlockStats(queues, 3, 2, 1, 0, 0, 1, 0);
     queues.update(block_very_low_redundancy, 0, 0, 0, 1, -4, -24);
-    verifyBlockStats(queues, 2, 3, 2, 0, 0);
+    verifyBlockStats(queues, 2, 3, 2, 0, 0, 1, 0);
+
+    // Reduce the expected replicas to 1 for block1
+    queues.update(block1, 1, 0, 0, 1, 0, 0);
+    verifyBlockStats(queues, 2, 3, 2, 0, 0, 0, 0);
   }
 
   @Test
@@ -145,12 +158,12 @@ public class TestLowRedundancyBlockQueues {
     assertAdded(queues, corruptBlock, 0, 0, 3);
     assertInLevel(queues, corruptBlock,
         LowRedundancyBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
-    verifyBlockStats(queues, 0, 1, 0, 0, 0);
+    verifyBlockStats(queues, 0, 1, 0, 0, 0, 0, 0);
 
     // Remove with wrong priority
     queues.remove(corruptBlock, LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY);
     // Verify the number of corrupt block is decremented
-    verifyBlockStats(queues, 0, 0, 0, 0, 0);
+    verifyBlockStats(queues, 0, 0, 0, 0, 0, 0, 0);
   }
 
   @Test
@@ -186,17 +199,17 @@ public class TestLowRedundancyBlockQueues {
         assertInLevel(queues, block,
             LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY);
       }
-      verifyBlockStats(queues, 0, 0, 0, numUR, 0);
+      verifyBlockStats(queues, 0, 0, 0, numUR, 0, 0, 1);
     }
 
     // add a corrupted block
     BlockInfo block_corrupt = genStripedBlockInfo(-10, numBytes);
     assertEquals(numCorrupt, queues.getCorruptBlockSize());
-    verifyBlockStats(queues, 0, 0, 0, numUR, numCorrupt);
+    verifyBlockStats(queues, 0, 0, 0, numUR, numCorrupt, 0, 1);
 
     assertAdded(queues, block_corrupt, dataBlkNum - 1, 0, groupSize);
     numCorrupt++;
-    verifyBlockStats(queues, 0, 0, 0, numUR, numCorrupt);
+    verifyBlockStats(queues, 0, 0, 0, numUR, numCorrupt, 0, 1);
 
     assertInLevel(queues, block_corrupt,
         LowRedundancyBlocks.QUEUE_WITH_CORRUPT_BLOCKS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 05cf2ea..57a1b01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -412,10 +412,12 @@ public class TestNameNodeMetrics {
     // Verify replica metrics
     assertGauge("LowRedundancyReplicatedBlocks", 0L, rb);
     assertGauge("CorruptReplicatedBlocks", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyReplicatedBlocks", 0L, rb);
 
     // Verify striped block groups metrics
     assertGauge("LowRedundancyECBlockGroups", 0L, rb);
     assertGauge("CorruptECBlockGroups", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyECBlocks", 0L, rb);
   }
 
   /**
@@ -492,9 +494,11 @@ public class TestNameNodeMetrics {
     // Verify replicated blocks metrics
     assertGauge("LowRedundancyReplicatedBlocks", 1L, rb);
     assertGauge("CorruptReplicatedBlocks", 1L, rb);
+    assertGauge("HighestPriorityLowRedundancyReplicatedBlocks", 1L, rb);
     // Verify striped blocks metrics
     assertGauge("LowRedundancyECBlockGroups", 0L, rb);
     assertGauge("CorruptECBlockGroups", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyECBlocks", 0L, rb);
 
     verifyAggregatedMetricsTally();
 
@@ -517,9 +521,11 @@ public class TestNameNodeMetrics {
     // Verify replicated blocks metrics
     assertGauge("LowRedundancyReplicatedBlocks", 0L, rb);
     assertGauge("CorruptReplicatedBlocks", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyReplicatedBlocks", 0L, rb);
     // Verify striped blocks metrics
     assertGauge("LowRedundancyECBlockGroups", 0L, rb);
     assertGauge("CorruptECBlockGroups", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyECBlocks", 0L, rb);
 
     verifyAggregatedMetricsTally();
 
@@ -580,9 +586,11 @@ public class TestNameNodeMetrics {
     // Verify replica metrics
     assertGauge("LowRedundancyReplicatedBlocks", 0L, rb);
     assertGauge("CorruptReplicatedBlocks", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyReplicatedBlocks", 0L, rb);
     // Verify striped block groups metrics
     assertGauge("LowRedundancyECBlockGroups", 1L, rb);
     assertGauge("CorruptECBlockGroups", 1L, rb);
+    assertGauge("HighestPriorityLowRedundancyECBlocks", 1L, rb);
 
     verifyAggregatedMetricsTally();
 
@@ -602,9 +610,11 @@ public class TestNameNodeMetrics {
     // Verify replicated blocks metrics
     assertGauge("LowRedundancyReplicatedBlocks", 0L, rb);
     assertGauge("CorruptReplicatedBlocks", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyReplicatedBlocks", 0L, rb);
     // Verify striped blocks metrics
     assertGauge("LowRedundancyECBlockGroups", 0L, rb);
     assertGauge("CorruptECBlockGroups", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyECBlocks", 0L, rb);
 
     verifyAggregatedMetricsTally();
 
@@ -666,6 +676,8 @@ public class TestNameNodeMetrics {
     assertGauge("UnderReplicatedBlocks", 1L, rb);
     assertGauge("MissingBlocks", 1L, rb);
     assertGauge("MissingReplOneBlocks", 1L, rb);
+    assertGauge("HighestPriorityLowRedundancyReplicatedBlocks", 0L, rb);
+    assertGauge("HighestPriorityLowRedundancyECBlocks", 0L, rb);
     fs.delete(file, true);
     waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ac0a53/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 647327c..5e4709e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -579,7 +579,7 @@ public class TestDFSAdmin {
       // Verify report command for all counts to be zero
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client, 0L, 0L);
 
       final short replFactor = 1;
       final long fileLength = 512L;
@@ -614,7 +614,7 @@ public class TestDFSAdmin {
       // Verify report command for all counts to be zero
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client, 0L, 0L);
 
       // Choose a DataNode to shutdown
       final List<DataNode> datanodes = miniCluster.getDataNodes();
@@ -636,7 +636,7 @@ public class TestDFSAdmin {
 
       // Verify report command to show dead DataNode
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, 0, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, 0, client, 0L, 1L);
 
       // Corrupt the replicated block
       final int blockFilesCorrupted = miniCluster
@@ -664,7 +664,7 @@ public class TestDFSAdmin {
       // verify report command for corrupt replicated block
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 0, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 0, client, 0L, 1L);
 
       lbs = miniCluster.getFileSystem().getClient().
           getNamenode().getBlockLocations(
@@ -689,7 +689,7 @@ public class TestDFSAdmin {
       // and EC block group
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 1, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 1, client, 0L, 0L);
     }
   }
 
@@ -834,7 +834,10 @@ public class TestDFSAdmin {
       final int numLiveDn,
       final int numCorruptBlocks,
       final int numCorruptECBlockGroups,
-      final DFSClient client) throws IOException {
+      final DFSClient client,
+      final Long highestPriorityLowRedundancyReplicatedBlocks,
+      final Long highestPriorityLowRedundancyECBlocks)
+      throws IOException {
 
     /* init vars */
     final String outStr = scanIntoString(out);
@@ -847,12 +850,23 @@ public class TestDFSAdmin {
     final String expectedCorruptedECBlockGroupsStr = String.format(
         "Block groups with corrupt internal blocks: %d",
         numCorruptECBlockGroups);
+    final String highestPriorityLowRedundancyReplicatedBlocksStr
+        = String.format(
+        "\tLow redundancy blocks with highest priority " +
+            "to recover: %d",
+        highestPriorityLowRedundancyReplicatedBlocks);
+    final String highestPriorityLowRedundancyECBlocksStr = String.format(
+        "\tLow redundancy blocks with highest priority " +
+            "to recover: %d",
+        highestPriorityLowRedundancyReplicatedBlocks);
 
     // verify nodes and corrupt blocks
     assertThat(outStr, is(allOf(
         containsString(expectedLiveNodesStr),
         containsString(expectedCorruptedBlocksStr),
-        containsString(expectedCorruptedECBlockGroupsStr))));
+        containsString(expectedCorruptedECBlockGroupsStr),
+        containsString(highestPriorityLowRedundancyReplicatedBlocksStr),
+        containsString(highestPriorityLowRedundancyECBlocksStr))));
 
     assertEquals(
         numDn,
@@ -867,8 +881,12 @@ public class TestDFSAdmin {
         client.getCorruptBlocksCount());
     assertEquals(numCorruptBlocks, client.getNamenode()
         .getReplicatedBlockStats().getCorruptBlocks());
+    assertEquals(highestPriorityLowRedundancyReplicatedBlocks, client.getNamenode()
+        .getReplicatedBlockStats().getHighestPriorityLowRedundancyBlocks());
     assertEquals(numCorruptECBlockGroups, client.getNamenode()
         .getECBlockGroupStats().getCorruptBlockGroups());
+    assertEquals(highestPriorityLowRedundancyECBlocks, client.getNamenode()
+        .getECBlockGroupStats().getHighestPriorityLowRedundancyBlocks());
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/14] hadoop git commit: HDFS-10240. Race between close/recoverLease leads to missing block. Contributed by Jinglun, zhouyingchao and Wei-Chiu Chuang.

Posted by ar...@apache.org.
HDFS-10240. Race between close/recoverLease leads to missing block. Contributed by Jinglun, zhouyingchao and Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86565005
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86565005
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86565005

Branch: refs/heads/branch-3.1
Commit: 865650052b07c8a20d51306202354ac770ed36d5
Parents: 0424715
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Thu Aug 16 16:29:38 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:57:40 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockInfo.java  |  4 ++
 .../server/blockmanagement/BlockManager.java    |  4 ++
 .../hdfs/server/datanode/BPServiceActor.java    |  3 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 10 +++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  | 65 ++++++++++++++++++++
 .../hdfs/server/datanode/DataNodeTestUtils.java |  3 +
 6 files changed, 88 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 111ade1..43f4f47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -262,6 +262,10 @@ public abstract class BlockInfo extends Block
     return getBlockUCState().equals(BlockUCState.COMPLETE);
   }
 
+  public boolean isUnderRecovery() {
+    return getBlockUCState().equals(BlockUCState.UNDER_RECOVERY);
+  }
+
   public final boolean isCompleteOrCommitted() {
     final BlockUCState state = getBlockUCState();
     return state.equals(BlockUCState.COMPLETE) ||

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bac89bf..6ab237f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -971,6 +971,10 @@ public class BlockManager implements BlockStatsMXBean {
       return false; // no blocks in file yet
     if(lastBlock.isComplete())
       return false; // already completed (e.g. by syncBlock)
+    if(lastBlock.isUnderRecovery()) {
+      throw new IOException("Commit or complete block " + commitBlock +
+          ", whereas it is under recovery.");
+    }
     
     final boolean committed = commitBlock(lastBlock, commitBlock);
     if (committed && lastBlock.isStriped()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index a94d2df..6c167f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -682,7 +682,8 @@ class BPServiceActor implements Runnable {
             }
           }
         }
-        if (ibrManager.sendImmediately() || sendHeartbeat) {
+        if (!dn.areIBRDisabledForTests() &&
+            (ibrManager.sendImmediately()|| sendHeartbeat)) {
           ibrManager.sendIBRs(bpNamenode, bpRegistration,
               bpos.getBlockPoolId());
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 4823358..ade2b11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -331,6 +331,7 @@ public class DataNode extends ReconfigurableBase
   ThreadGroup threadGroup = null;
   private DNConf dnConf;
   private volatile boolean heartbeatsDisabledForTests = false;
+  private volatile boolean ibrDisabledForTests = false;
   private volatile boolean cacheReportsDisabledForTests = false;
   private DataStorage storage = null;
 
@@ -1334,6 +1335,15 @@ public class DataNode extends ReconfigurableBase
   }
 
   @VisibleForTesting
+  void setIBRDisabledForTest(boolean disabled) {
+    this.ibrDisabledForTests = disabled;
+  }
+
+  @VisibleForTesting
+  boolean areIBRDisabledForTests() {
+    return this.ibrDisabledForTests;
+  }
+
   void setCacheReportsDisabledForTest(boolean disabled) {
     this.cacheReportsDisabledForTests = disabled;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index a96d8b3..940e13e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -164,6 +165,70 @@ public class TestLeaseRecovery2 {
   }
 
   @Test
+  public void testCloseWhileRecoverLease() throws Exception {
+    // test recoverLease
+    // set the soft limit to be 1 hour but recoverLease should
+    // close the file immediately
+    cluster.setLeasePeriod(LONG_LEASE_PERIOD, LONG_LEASE_PERIOD);
+    int size = AppendTestUtil.nextInt(FILE_SIZE);
+    String filestr = "/testCloseWhileRecoverLease";
+
+    AppendTestUtil.LOG.info("filestr=" + filestr);
+    Path filepath = new Path(filestr);
+    FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE,
+        REPLICATION_NUM, BLOCK_SIZE);
+    assertTrue(dfs.dfs.exists(filestr));
+
+    // hflush file
+    AppendTestUtil.LOG.info("hflush");
+    stm.hflush();
+
+    // Pause DN block report.
+    // Let client recover lease, and then close the file, and then let DN
+    // report blocks.
+    ArrayList<DataNode> dataNodes = cluster.getDataNodes();
+    for (DataNode dn: dataNodes) {
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
+    }
+
+    LOG.info("pause IBR");
+    for (DataNode dn: dataNodes) {
+      DataNodeTestUtils.pauseIBR(dn);
+    }
+
+    AppendTestUtil.LOG.info("size=" + size);
+    stm.write(buffer, 0, size);
+
+    // hflush file
+    AppendTestUtil.LOG.info("hflush");
+    stm.hflush();
+
+    LOG.info("recover lease");
+    dfs.recoverLease(filepath);
+    try {
+      stm.close();
+      fail("close() should fail because the file is under recovery.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "whereas it is under recovery", ioe);
+    }
+
+    for (DataNode dn: dataNodes) {
+      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
+    }
+
+    LOG.info("trigger heartbeats");
+    // resume DN block report
+    for (DataNode dn: dataNodes) {
+      DataNodeTestUtils.triggerHeartbeat(dn);
+    }
+
+    stm.close();
+    assertEquals(cluster.getNamesystem().getBlockManager().
+        getMissingBlocksCount(), 0);
+  }
+
+  @Test
   public void testLeaseRecoverByAnotherUser() throws Exception {
     byte [] actual = new byte[FILE_SIZE];
     cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86565005/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index 19d9dfc..25eca88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -98,6 +98,9 @@ public class DataNodeTestUtils {
     }
   }
 
+  public static void pauseIBR(DataNode dn) {
+    dn.setIBRDisabledForTest(true);
+  }
   public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
       DataNode dn, DatanodeID datanodeid, final Configuration conf,
       boolean connectToDnViaHostname) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/14] hadoop git commit: HDFS-13792. Fix FSN read/write lock metrics name. Contributed by Chao Sun.

Posted by ar...@apache.org.
HDFS-13792. Fix FSN read/write lock metrics name. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4b75ad5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4b75ad5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4b75ad5

Branch: refs/heads/branch-3.1
Commit: e4b75ad583fe7d78a529288992f3721f29213f89
Parents: 0d155de
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Aug 7 09:32:51 2018 +0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:50:47 2018 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md                 | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4b75ad5/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 676ab0b..f1dbc50 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -252,8 +252,10 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
 | `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in maintenance state |
 | `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in maintenance state |
 | `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering the maintenance state |
-| `FSN(Read/Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
-| `FSN(Read/Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
+| `FSN(Read/Write)Lock`*OperationName*`NanosNumOps` | Total number of acquiring lock by operations |
+| `FSN(Read/Write)Lock`*OperationName*`NanosAvgTime` | Average time of holding the lock by operations in nanoseconds |
+| `FSN(Read/Write)LockOverallNanosNumOps`  | Total number of acquiring lock by all operations |
+| `FSN(Read/Write)LockOverallNanosAvgTime` | Average time of holding the lock by all operations in nanoseconds |
 
 JournalNode
 -----------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/14] hadoop git commit: HDFS-13785. EC: 'removePolicy' is not working for built-in/system Erasure Code policies. Contributed by Ayush Saxena

Posted by ar...@apache.org.
HDFS-13785. EC: 'removePolicy' is not working for built-in/system Erasure Code policies. Contributed by Ayush Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a517ee4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a517ee4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a517ee4f

Branch: refs/heads/branch-3.1
Commit: a517ee4f5be39828bf18795462457fdf1bb0db00
Parents: 7540699
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Aug 8 12:42:20 2018 +0530
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:51:53 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  4 ++--
 .../src/site/markdown/HDFSErasureCoding.md      |  4 ++--
 .../test/resources/testErasureCodingConf.xml    | 22 +++++++++++++++++++-
 3 files changed, 25 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a517ee4f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 9b9fe14..56706b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -154,7 +154,7 @@ public class ECAdmin extends Configured implements Tool {
       listing.addRow("<file>",
           "The path of the xml file which defines the EC policies to add");
       return getShortUsage() + "\n" +
-          "Add a list of erasure coding policies.\n" +
+          "Add a list of user defined erasure coding policies.\n" +
           listing.toString();
     }
 
@@ -268,7 +268,7 @@ public class ECAdmin extends Configured implements Tool {
       TableListing listing = AdminHelper.getOptionDescriptionListing();
       listing.addRow("<policy>", "The name of the erasure coding policy");
       return getShortUsage() + "\n" +
-          "Remove an erasure coding policy.\n" +
+          "Remove an user defined erasure coding policy.\n" +
           listing.toString();
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a517ee4f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index f3b920f..2e8cbbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -203,7 +203,7 @@ Below are the details about each command.
 
  *  `[-addPolicies -policyFile <file>]`
 
-     Add a list of erasure coding policies. Please refer etc/hadoop/user_ec_policies.xml.template for the example policy file. The maximum cell size is defined in property 'dfs.namenode.ec.policies.max.cellsize' with the default value 4MB. Currently HDFS allows the user to add 64 policies in total, and the added policy ID is in range of 64 to 127. Adding policy will fail if there are already 64 policies added.
+     Add a list of user defined erasure coding policies. Please refer etc/hadoop/user_ec_policies.xml.template for the example policy file. The maximum cell size is defined in property 'dfs.namenode.ec.policies.max.cellsize' with the default value 4MB. Currently HDFS allows the user to add 64 policies in total, and the added policy ID is in range of 64 to 127. Adding policy will fail if there are already 64 policies added.
 
  *  `[-listCodecs]`
 
@@ -211,7 +211,7 @@ Below are the details about each command.
 
 *  `[-removePolicy -policy <policyName>]`
 
-     Remove an erasure coding policy.
+     Remove an user defined erasure coding policy.
 
 *  `[-enablePolicy -policy <policyName>]`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a517ee4f/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 2f7a6a7..9070367 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -154,7 +154,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Add a list of erasure coding policies</expected-output>
+          <expected-output>Add a list of user defined erasure coding policies</expected-output>
         </comparator>
         <comparator>
           <type>SubstringComparator</type>
@@ -164,6 +164,26 @@
     </test>
 
     <test>
+      <description>help: removePolicy command</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -help removePolicy
+        </ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Remove an user defined erasure coding policy</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>[-removePolicy -policy &lt;policy&gt;]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
       <description>help: enablePolicy command</description>
       <test-commands>
         <ec-admin-command>-fs NAMENODE -help enablePolicy</ec-admin-command>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/14] hadoop git commit: HDFS-13217. Audit log all EC policy names during addErasureCodingPolicies. Contributed by liaoyuxiangqin.

Posted by ar...@apache.org.
HDFS-13217. Audit log all EC policy names during addErasureCodingPolicies. Contributed by liaoyuxiangqin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81fbfe50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81fbfe50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81fbfe50

Branch: refs/heads/branch-3.1
Commit: 81fbfe500e4ac1c57c681db20f620336a072898e
Parents: c15853f
Author: Xiao Chen <xi...@apache.org>
Authored: Wed Aug 15 09:22:24 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:56:47 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81fbfe50/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5eef12b..f6c8c94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7475,9 +7475,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       ErasureCodingPolicy[] policies, final boolean logRetryCache)
       throws IOException {
     final String operationName = "addErasureCodingPolicies";
-    String addECPolicyName = "";
+    List<String> addECPolicyNames = new ArrayList<>(policies.length);
     checkOperation(OperationCategory.WRITE);
-    List<AddErasureCodingPolicyResponse> responses = new ArrayList<>();
+    List<AddErasureCodingPolicyResponse> responses =
+        new ArrayList<>(policies.length);
     boolean success = false;
     writeLock();
     try {
@@ -7488,7 +7489,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           ErasureCodingPolicy newPolicy =
               FSDirErasureCodingOp.addErasureCodingPolicy(this, policy,
                   logRetryCache);
-          addECPolicyName = newPolicy.getName();
+          addECPolicyNames.add(newPolicy.getName());
           responses.add(new AddErasureCodingPolicyResponse(newPolicy));
         } catch (HadoopIllegalArgumentException e) {
           responses.add(new AddErasureCodingPolicyResponse(policy, e));
@@ -7501,7 +7502,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (success) {
         getEditLog().logSync();
       }
-      logAuditEvent(success, operationName, addECPolicyName, null, null);
+      logAuditEvent(success, operationName, addECPolicyNames.toString(),
+          null, null);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/14] hadoop git commit: HADOOP-15655. Enhance KMS client retry behavior. Contributed by Kitti Nanasi.

Posted by ar...@apache.org.
HADOOP-15655. Enhance KMS client retry behavior. Contributed by Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a630a27c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a630a27c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a630a27c

Branch: refs/heads/branch-3.1
Commit: a630a27c53107322a72f9b76e395c4537b09c3fc
Parents: 8656500
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Aug 16 22:32:32 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:57:51 2018 -0700

----------------------------------------------------------------------
 .../key/kms/LoadBalancingKMSClientProvider.java |  43 ++---
 .../kms/TestLoadBalancingKMSClientProvider.java | 181 ++++++++++++++++++-
 2 files changed, 193 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a630a27c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 9677b0d..e0ffdb1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -113,8 +113,8 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
     return providers;
   }
 
-  private <T> T doOp(ProviderCallable<T> op, int currPos)
-      throws IOException {
+  private <T> T doOp(ProviderCallable<T> op, int currPos,
+      boolean isIdempotent) throws IOException {
     if (providers.length == 0) {
       throw new IOException("No providers configured !");
     }
@@ -143,7 +143,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         }
         RetryAction action = null;
         try {
-          action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
+          action = retryPolicy.shouldRetry(ioe, 0, numFailovers, isIdempotent);
         } catch (Exception e) {
           if (e instanceof IOException) {
             throw (IOException)e;
@@ -201,7 +201,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public Token<?>[] call(KMSClientProvider provider) throws IOException {
         return provider.addDelegationTokens(renewer, credentials);
       }
-    }, nextIdx());
+    }, nextIdx(), false);
   }
 
   @Override
@@ -211,7 +211,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public Long call(KMSClientProvider provider) throws IOException {
         return provider.renewDelegationToken(token);
       }
-    }, nextIdx());
+    }, nextIdx(), false);
   }
 
   @Override
@@ -222,7 +222,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         provider.cancelDelegationToken(token);
         return null;
       }
-    }, nextIdx());
+    }, nextIdx(), false);
   }
 
   // This request is sent to all providers in the load-balancing group
@@ -275,7 +275,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
             throws IOException, GeneralSecurityException {
           return provider.generateEncryptedKey(encryptionKeyName);
         }
-      }, nextIdx());
+      }, nextIdx(), true);
     } catch (WrapperException we) {
       if (we.getCause() instanceof GeneralSecurityException) {
         throw (GeneralSecurityException) we.getCause();
@@ -295,7 +295,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
             throws IOException, GeneralSecurityException {
           return provider.decryptEncryptedKey(encryptedKeyVersion);
         }
-      }, nextIdx());
+      }, nextIdx(), true);
     } catch (WrapperException we) {
       if (we.getCause() instanceof GeneralSecurityException) {
         throw (GeneralSecurityException) we.getCause();
@@ -315,7 +315,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
             throws IOException, GeneralSecurityException {
           return provider.reencryptEncryptedKey(ekv);
         }
-      }, nextIdx());
+      }, nextIdx(), true);
     } catch (WrapperException we) {
       if (we.getCause() instanceof GeneralSecurityException) {
         throw (GeneralSecurityException) we.getCause();
@@ -335,7 +335,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
           provider.reencryptEncryptedKeys(ekvs);
           return null;
         }
-      }, nextIdx());
+      }, nextIdx(), true);
     } catch (WrapperException we) {
       if (we.getCause() instanceof GeneralSecurityException) {
         throw (GeneralSecurityException) we.getCause();
@@ -351,7 +351,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public KeyVersion call(KMSClientProvider provider) throws IOException {
         return provider.getKeyVersion(versionName);
       }
-    }, nextIdx());
+    }, nextIdx(), true);
   }
 
   @Override
@@ -361,7 +361,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public List<String> call(KMSClientProvider provider) throws IOException {
         return provider.getKeys();
       }
-    }, nextIdx());
+    }, nextIdx(), true);
   }
 
   @Override
@@ -371,7 +371,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public Metadata[] call(KMSClientProvider provider) throws IOException {
         return provider.getKeysMetadata(names);
       }
-    }, nextIdx());
+    }, nextIdx(), true);
   }
 
   @Override
@@ -382,7 +382,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
           throws IOException {
         return provider.getKeyVersions(name);
       }
-    }, nextIdx());
+    }, nextIdx(), true);
   }
 
   @Override
@@ -392,8 +392,9 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public KeyVersion call(KMSClientProvider provider) throws IOException {
         return provider.getCurrentKey(name);
       }
-    }, nextIdx());
+    }, nextIdx(), true);
   }
+
   @Override
   public Metadata getMetadata(final String name) throws IOException {
     return doOp(new ProviderCallable<Metadata>() {
@@ -401,7 +402,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public Metadata call(KMSClientProvider provider) throws IOException {
         return provider.getMetadata(name);
       }
-    }, nextIdx());
+    }, nextIdx(), true);
   }
 
   @Override
@@ -412,7 +413,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public KeyVersion call(KMSClientProvider provider) throws IOException {
         return provider.createKey(name, material, options);
       }
-    }, nextIdx());
+    }, nextIdx(), false);
   }
 
   @Override
@@ -425,7 +426,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
             NoSuchAlgorithmException {
           return provider.createKey(name, options);
         }
-      }, nextIdx());
+      }, nextIdx(), false);
     } catch (WrapperException e) {
       if (e.getCause() instanceof GeneralSecurityException) {
         throw (NoSuchAlgorithmException) e.getCause();
@@ -442,7 +443,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         provider.deleteKey(name);
         return null;
       }
-    }, nextIdx());
+    }, nextIdx(), false);
   }
 
   @Override
@@ -453,7 +454,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       public KeyVersion call(KMSClientProvider provider) throws IOException {
         return provider.rollNewVersion(name, material);
       }
-    }, nextIdx());
+    }, nextIdx(), false);
     invalidateCache(name);
     return newVersion;
   }
@@ -468,7 +469,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
             NoSuchAlgorithmException {
           return provider.rollNewVersion(name);
         }
-      }, nextIdx());
+      }, nextIdx(), false);
       invalidateCache(name);
       return newVersion;
     } catch (WrapperException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a630a27c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 4e7aed9..058db92 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -29,10 +29,13 @@ import static org.mockito.Mockito.verify;
 import java.io.IOException;
 import java.net.ConnectException;
 import java.net.NoRouteToHostException;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.UnknownHostException;
 import java.security.GeneralSecurityException;
 import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
+import java.util.List;
 
 import javax.net.ssl.SSLHandshakeException;
 
@@ -355,24 +358,27 @@ public class TestLoadBalancingKMSClientProvider {
   }
 
   /**
-   * Tests whether retryPolicy fails immediately, after trying each provider
-   * once, on encountering IOException which is not SocketException.
+   * Tests whether retryPolicy fails immediately on non-idempotent operations,
+   * after trying each provider once,
+   * on encountering IOException which is not SocketException.
    * @throws Exception
    */
   @Test
-  public void testClientRetriesWithIOException() throws Exception {
+  public void testClientRetriesNonIdempotentOpWithIOExceptionFailsImmediately()
+      throws Exception {
     Configuration conf = new Configuration();
+    final String keyName = "test";
     // Setting total failover attempts to .
     conf.setInt(
         CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 10);
     KMSClientProvider p1 = mock(KMSClientProvider.class);
-    when(p1.getMetadata(Mockito.anyString()))
+    when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
         .thenThrow(new IOException("p1"));
     KMSClientProvider p2 = mock(KMSClientProvider.class);
-    when(p2.getMetadata(Mockito.anyString()))
+    when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
         .thenThrow(new IOException("p2"));
     KMSClientProvider p3 = mock(KMSClientProvider.class);
-    when(p3.getMetadata(Mockito.anyString()))
+    when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
         .thenThrow(new IOException("p3"));
 
     when(p1.getKMSUrl()).thenReturn("p1");
@@ -381,17 +387,61 @@ public class TestLoadBalancingKMSClientProvider {
     LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
         new KMSClientProvider[] {p1, p2, p3}, 0, conf);
     try {
-      kp.getMetadata("test3");
+      kp.createKey(keyName, new Options(conf));
       fail("Should fail since all providers threw an IOException");
     } catch (Exception e) {
       assertTrue(e instanceof IOException);
     }
     verify(kp.getProviders()[0], Mockito.times(1))
-        .getMetadata(Mockito.eq("test3"));
+        .createKey(Mockito.eq(keyName), Mockito.any(Options.class));
+    verify(kp.getProviders()[1], Mockito.times(1))
+        .createKey(Mockito.eq(keyName), Mockito.any(Options.class));
+    verify(kp.getProviders()[2], Mockito.times(1))
+        .createKey(Mockito.eq(keyName), Mockito.any(Options.class));
+  }
+
+  /**
+   * Tests whether retryPolicy retries on idempotent operations
+   * when encountering IOException.
+   * @throws Exception
+   */
+  @Test
+  public void testClientRetriesIdempotentOpWithIOExceptionSucceedsSecondTime()
+      throws Exception {
+    Configuration conf = new Configuration();
+    final String keyName = "test";
+    final KeyProvider.KeyVersion keyVersion
+        = new KMSClientProvider.KMSKeyVersion(keyName, "v1",
+        new byte[0]);
+    // Setting total failover attempts to .
+    conf.setInt(
+        CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 10);
+    KMSClientProvider p1 = mock(KMSClientProvider.class);
+    when(p1.getCurrentKey(Mockito.anyString()))
+        .thenThrow(new IOException("p1"))
+        .thenReturn(keyVersion);
+    KMSClientProvider p2 = mock(KMSClientProvider.class);
+    when(p2.getCurrentKey(Mockito.anyString()))
+        .thenThrow(new IOException("p2"));
+    KMSClientProvider p3 = mock(KMSClientProvider.class);
+    when(p3.getCurrentKey(Mockito.anyString()))
+        .thenThrow(new IOException("p3"));
+
+    when(p1.getKMSUrl()).thenReturn("p1");
+    when(p2.getKMSUrl()).thenReturn("p2");
+    when(p3.getKMSUrl()).thenReturn("p3");
+    LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+        new KMSClientProvider[] {p1, p2, p3}, 0, conf);
+
+    KeyProvider.KeyVersion result = kp.getCurrentKey(keyName);
+
+    assertEquals(keyVersion, result);
+    verify(kp.getProviders()[0], Mockito.times(2))
+        .getCurrentKey(Mockito.eq(keyName));
     verify(kp.getProviders()[1], Mockito.times(1))
-        .getMetadata(Mockito.eq("test3"));
+        .getCurrentKey(Mockito.eq(keyName));
     verify(kp.getProviders()[2], Mockito.times(1))
-        .getMetadata(Mockito.eq("test3"));
+        .getCurrentKey(Mockito.eq(keyName));
   }
 
   /**
@@ -717,4 +767,115 @@ public class TestLoadBalancingKMSClientProvider {
     verify(p2, Mockito.times(1)).createKey(Mockito.eq(keyName),
         Mockito.any(Options.class));
   }
+
+  /**
+   * Tests that if an idempotent operation succeeds second time after
+   * SocketTimeoutException, then the operation is successful.
+   * @throws Exception
+   */
+  @Test
+  public void testClientRetriesIdempotentOpWithSocketTimeoutExceptionSucceeds()
+      throws Exception {
+    Configuration conf = new Configuration();
+    conf.setInt(
+        CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
+    final List<String> keys = Arrays.asList("testKey");
+    KMSClientProvider p1 = mock(KMSClientProvider.class);
+    when(p1.getKeys())
+        .thenThrow(new SocketTimeoutException("p1"))
+        .thenReturn(keys);
+    KMSClientProvider p2 = mock(KMSClientProvider.class);
+    when(p2.getKeys()).thenThrow(new SocketTimeoutException("p2"));
+
+    when(p1.getKMSUrl()).thenReturn("p1");
+    when(p2.getKMSUrl()).thenReturn("p2");
+
+    LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+        new KMSClientProvider[] {p1, p2}, 0, conf);
+
+    List<String> result = kp.getKeys();
+    assertEquals(keys, result);
+    verify(p1, Mockito.times(2)).getKeys();
+    verify(p2, Mockito.times(1)).getKeys();
+  }
+
+  /**
+   * Tests that if a non idempotent operation fails at every attempt
+   * after SocketTimeoutException, then SocketTimeoutException is thrown.
+   * @throws Exception
+   */
+  @Test
+  public void testClientRetriesIdempotentOpWithSocketTimeoutExceptionFails()
+      throws Exception {
+    Configuration conf = new Configuration();
+    conf.setInt(
+        CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 2);
+    final String keyName = "test";
+    final String exceptionMessage = "p1 exception message";
+    KMSClientProvider p1 = mock(KMSClientProvider.class);
+    Exception originalEx = new SocketTimeoutException(exceptionMessage);
+    when(p1.getKeyVersions(Mockito.anyString()))
+        .thenThrow(originalEx);
+    KMSClientProvider p2 = mock(KMSClientProvider.class);
+    when(p2.getKeyVersions(Mockito.anyString()))
+        .thenThrow(new SocketTimeoutException("p2 exception message"));
+
+    when(p1.getKMSUrl()).thenReturn("p1");
+    when(p2.getKMSUrl()).thenReturn("p2");
+
+    LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+        new KMSClientProvider[] {p1, p2}, 0, conf);
+
+    Exception interceptedEx = intercept(SocketTimeoutException.class,
+        "SocketTimeoutException: " + exceptionMessage,
+        ()-> kp.getKeyVersions(keyName));
+    assertEquals(originalEx, interceptedEx);
+
+    verify(p1, Mockito.times(2))
+        .getKeyVersions(Mockito.eq(keyName));
+    verify(p2, Mockito.times(1))
+        .getKeyVersions(Mockito.eq(keyName));
+  }
+
+  /**
+   * Tests whether retryPolicy fails immediately on non-idempotent operations,
+   * after trying each provider once, on encountering SocketTimeoutException.
+   * @throws Exception
+   */
+  @Test
+  public void testClientRetriesNonIdempotentOpWithSocketTimeoutExceptionFails()
+      throws Exception {
+    Configuration conf = new Configuration();
+    final String keyName = "test";
+    // Setting total failover attempts to .
+    conf.setInt(
+        CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 10);
+    KMSClientProvider p1 = mock(KMSClientProvider.class);
+    when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+        .thenThrow(new SocketTimeoutException("p1"));
+    KMSClientProvider p2 = mock(KMSClientProvider.class);
+    when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+        .thenThrow(new SocketTimeoutException("p2"));
+    KMSClientProvider p3 = mock(KMSClientProvider.class);
+    when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+        .thenThrow(new SocketTimeoutException("p3"));
+
+    when(p1.getKMSUrl()).thenReturn("p1");
+    when(p2.getKMSUrl()).thenReturn("p2");
+    when(p3.getKMSUrl()).thenReturn("p3");
+    LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+        new KMSClientProvider[] {p1, p2, p3}, 0, conf);
+    try {
+      kp.createKey(keyName, new Options(conf));
+      fail("Should fail since all providers threw a SocketTimeoutException");
+    } catch (Exception e) {
+      assertTrue(e instanceof SocketTimeoutException);
+    }
+    verify(kp.getProviders()[0], Mockito.times(1))
+        .createKey(Mockito.eq(keyName), Mockito.any(Options.class));
+    verify(kp.getProviders()[1], Mockito.times(1))
+        .createKey(Mockito.eq(keyName), Mockito.any(Options.class));
+    verify(kp.getProviders()[2], Mockito.times(1))
+        .createKey(Mockito.eq(keyName), Mockito.any(Options.class));
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/14] hadoop git commit: HDFS-13819. TestDirectoryScanner#testDirectoryScannerInFederatedCluster is flaky

Posted by ar...@apache.org.
HDFS-13819. TestDirectoryScanner#testDirectoryScannerInFederatedCluster is flaky

Change-Id: I1cea6e67fcec72702ad202775dee3373261ac5cd


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c15853f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c15853f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c15853f8

Branch: refs/heads/branch-3.1
Commit: c15853f87ae1ce4a474494f88407786b3a644068
Parents: 975d606
Author: Daniel Templeton <te...@apache.org>
Authored: Tue Aug 14 17:03:10 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:54:58 2018 -0700

----------------------------------------------------------------------
 .../server/datanode/TestDirectoryScanner.java   | 42 +++++++++++++++-----
 1 file changed, 32 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c15853f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index f792523..893fe20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -40,6 +40,7 @@ import java.util.Random;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.io.FileUtils;
@@ -312,18 +313,29 @@ public class TestDirectoryScanner {
     return id;
   }
 
-  private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
-      long missingMemoryBlocks, long mismatchBlocks) throws IOException {
+  private void scan(long totalBlocks, int diffsize, long missingMetaFile,
+      long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks)
+      throws IOException, InterruptedException, TimeoutException {
     scan(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
          missingMemoryBlocks, mismatchBlocks, 0);
   }
 
   private void scan(long totalBlocks, int diffsize, long missingMetaFile,
       long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
-      long duplicateBlocks) throws IOException {
+      long duplicateBlocks)
+      throws IOException, InterruptedException, TimeoutException {
     scanner.reconcile();
-    verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
-        missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+
+    GenericTestUtils.waitFor(() -> {
+      try {
+        verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
+            missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+      } catch (AssertionError ex) {
+        return false;
+      }
+
+      return true;
+    }, 50, 2000);
   }
 
   private void verifyStats(long totalBlocks, int diffsize, long missingMetaFile,
@@ -785,7 +797,8 @@ public class TestDirectoryScanner {
     }
   }
 
-  private float runThrottleTest(int blocks) throws IOException {
+  private float runThrottleTest(int blocks)
+      throws IOException, InterruptedException, TimeoutException {
     scanner.setRetainDiffs(true);
     scan(blocks, 0, 0, 0, 0, 0);
     scanner.shutdown();
@@ -1069,10 +1082,19 @@ public class TestDirectoryScanner {
       scanner.setRetainDiffs(true);
       scanner.reconcile();
       //Check blocks in corresponding BP
-      bpid = cluster.getNamesystem(1).getBlockPoolId();
-      verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
-      bpid = cluster.getNamesystem(3).getBlockPoolId();
-      verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+
+      GenericTestUtils.waitFor(() -> {
+        try {
+          bpid = cluster.getNamesystem(1).getBlockPoolId();
+          verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
+          bpid = cluster.getNamesystem(3).getBlockPoolId();
+          verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+        } catch (AssertionError ex) {
+          return false;
+        }
+
+        return true;
+      }, 50, 2000);
     } finally {
       if (scanner != null) {
         scanner.shutdown();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/14] hadoop git commit: HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel.

Posted by ar...@apache.org.
HDFS-13732. ECAdmin should print the policy name when an EC policy is set. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04247152
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04247152
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04247152

Branch: refs/heads/branch-3.1
Commit: 0424715207cd07debeee5c624973e9db90d36fb6
Parents: 81fbfe5
Author: Xiao Chen <xi...@apache.org>
Authored: Wed Aug 15 13:51:14 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Aug 20 14:57:07 2018 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 12 ++++++------
 .../src/test/resources/testErasureCodingConf.xml        |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04247152/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 56706b2..56d453b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -357,16 +357,16 @@ public class ECAdmin extends Configured implements Tool {
       final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
       try {
         dfs.setErasureCodingPolicy(p, ecPolicyName);
-        if (ecPolicyName == null){
-          ecPolicyName = "default";
-        }
-        System.out.println("Set " + ecPolicyName + " erasure coding policy on" +
-            " " + path);
+
+        String actualECPolicyName = dfs.getErasureCodingPolicy(p).getName();
+
+        System.out.println("Set " + actualECPolicyName +
+            " erasure coding policy on "+ path);
         RemoteIterator<FileStatus> dirIt = dfs.listStatusIterator(p);
         if (dirIt.hasNext()) {
           System.out.println("Warning: setting erasure coding policy on a " +
               "non-empty directory will not automatically convert existing " +
-              "files to " + ecPolicyName + " erasure coding policy");
+              "files to " + actualECPolicyName + " erasure coding policy");
         }
       } catch (Exception e) {
         System.err.println(AdminHelper.prettifyException(e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04247152/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 9070367..b47d50f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -710,7 +710,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set default erasure coding policy on /ecdir</expected-output>
+          <expected-output>Set RS-6-3-1024k erasure coding policy on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -728,11 +728,11 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set default erasure coding policy on /ecdir</expected-output>
+          <expected-output>Set RS-6-3-1024k erasure coding policy on /ecdir</expected-output>
         </comparator>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Warning: setting erasure coding policy on a non-empty directory will not automatically convert existing files to default erasure coding policy</expected-output>
+          <expected-output>Warning: setting erasure coding policy on a non-empty directory will not automatically convert existing files to RS-6-3-1024k erasure coding policy</expected-output>
         </comparator>
       </comparators>
     </test>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org