You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by he...@apache.org on 2021/07/17 13:57:50 UTC

[hadoop] branch trunk updated: HDFS-16067. Support Append API in NNThroughputBenchmark. Contributed by Renukaprasad C.

This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 6ed7670  HDFS-16067. Support Append API in NNThroughputBenchmark. Contributed by Renukaprasad C.
6ed7670 is described below

commit 6ed7670a93da26d475ce1b917abb45ce902c0627
Author: He Xiaoqiao <he...@apache.org>
AuthorDate: Sat Jul 17 21:57:00 2021 +0800

    HDFS-16067. Support Append API in NNThroughputBenchmark. Contributed by Renukaprasad C.
---
 .../src/site/markdown/Benchmarking.md              |  1 +
 .../server/namenode/NNThroughputBenchmark.java     | 52 ++++++++++++++++++++++
 .../server/namenode/TestNNThroughputBenchmark.java | 46 +++++++++++++++++++
 3 files changed, 99 insertions(+)

diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
index ebd7086..26d5db3 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
@@ -58,6 +58,7 @@ Following are all the operations supported along with their respective operation
 |`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] |
 |`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
 |`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
+|`append` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-appendNewBlk`] |
 |`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
 |`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
 |`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] |
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index e861a34..7f6d572 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -827,6 +827,53 @@ public class NNThroughputBenchmark implements Tool {
   }
 
   /**
+   * Append file statistics.
+   * Measure how many append calls the name-node can handle per second.
+   */
+  class AppendFileStats extends OpenFileStats {
+    // Operation types
+    static final String OP_APPEND_NAME = "append";
+    public static final String APPEND_NEW_BLK = "-appendNewBlk";
+    static final String OP_APPEND_USAGE =
+        "-op " + OP_APPEND_NAME + OP_USAGE_ARGS + " [" + APPEND_NEW_BLK + ']';
+    private boolean appendNewBlk = false;
+
+    AppendFileStats(List<String> args) {
+      super(args);
+    }
+
+    @Override
+    String getOpName() {
+      return OP_APPEND_NAME;
+    }
+
+    @Override
+    void parseArguments(List<String> args) {
+      appendNewBlk = args.contains(APPEND_NEW_BLK);
+      if (this.appendNewBlk) {
+        args.remove(APPEND_NEW_BLK);
+      }
+      super.parseArguments(args);
+    }
+
+    @Override
+    long executeOp(int daemonId, int inputIdx, String ignore)
+        throws IOException {
+      long start = Time.now();
+      String src = fileNames[daemonId][inputIdx];
+      EnumSetWritable<CreateFlag> enumSet = null;
+      if (appendNewBlk) {
+        enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.NEW_BLOCK));
+      } else {
+        enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
+      }
+      clientProto.append(src, "TestClient", enumSet);
+      long end = Time.now();
+      return end - start;
+    }
+  }
+
+  /**
    * List file status statistics.
    * 
    * Measure how many get-file-status calls the name-node can handle per second.
@@ -1434,6 +1481,7 @@ public class NNThroughputBenchmark implements Tool {
         + " | \n\t" + MkdirsStats.OP_MKDIRS_USAGE
         + " | \n\t" + OpenFileStats.OP_OPEN_USAGE
         + " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
+        + " | \n\t" + AppendFileStats.OP_APPEND_USAGE
         + " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
         + " | \n\t" + RenameFileStats.OP_RENAME_USAGE
         + " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE
@@ -1496,6 +1544,10 @@ public class NNThroughputBenchmark implements Tool {
         opStat = new DeleteFileStats(args);
         ops.add(opStat);
       }
+      if (runAll || AppendFileStats.OP_APPEND_NAME.equals(type)) {
+        opStat = new AppendFileStats(args);
+        ops.add(opStat);
+      }
       if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
         opStat = new FileStatusStats(args);
         ops.add(opStat);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
index ec0d6df..44bf5b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
@@ -26,8 +26,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.util.ExitUtil;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -120,4 +123,47 @@ public class TestNNThroughputBenchmark {
       }
     }
   }
+
+  /**
+   * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
+   * for append operation.
+   */
+  @Test(timeout = 120000)
+  public void testNNThroughputForAppendOp() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+
+      final Configuration benchConf = new HdfsConfiguration();
+      benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16);
+      FileSystem.setDefaultUri(benchConf, cluster.getURI());
+      NNThroughputBenchmark.runBenchmark(benchConf,
+          new String[] {"-op", "create", "-keepResults", "-files", "3",
+              "-close" });
+      FSNamesystem fsNamesystem = cluster.getNamesystem();
+      DirectoryListing listing =
+          fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
+      HdfsFileStatus[] partialListing = listing.getPartialListing();
+
+      NNThroughputBenchmark.runBenchmark(benchConf,
+          new String[] {"-op", "append", "-files", "3", "-useExisting" });
+      listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
+      HdfsFileStatus[] partialListingAfter = listing.getPartialListing();
+
+      Assert.assertEquals(partialListing.length, partialListingAfter.length);
+      for (int i = 0; i < partialListing.length; i++) {
+        //Check the modification time after append operation
+        Assert.assertNotEquals(partialListing[i].getModificationTime(),
+            partialListingAfter[i].getModificationTime());
+      }
+
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org