You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2013/12/29 22:22:04 UTC
svn commit: r1554071 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
Author: shv
Date: Sun Dec 29 21:22:04 2013
New Revision: 1554071
URL: http://svn.apache.org/r1554071
Log:
HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark. Contributed by Plamen Jeliazkov.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1554071&r1=1554070&r2=1554071&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Dec 29 21:22:04 2013
@@ -618,9 +618,6 @@ Release 2.4.0 - UNRELEASED
HDFS-5004. Add additional JMX bean for NameNode status data
(Trevor Lorimer via cos)
- HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
- (shv)
-
HDFS-4994. Audit log getContentSummary() calls. (Robert Parker via kihwal)
HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via
@@ -866,6 +863,12 @@ Release 2.3.0 - UNRELEASED
HDFS-5662. Can't decommission a DataNode due to file's replication factor
larger than the rest of the cluster size. (brandonli)
+ HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
+ (shv)
+
+ HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
+ (Plamen Jeliazkov via shv)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1554071&r1=1554070&r2=1554071&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Sun Dec 29 21:22:04 2013
@@ -606,6 +606,98 @@ public class NNThroughputBenchmark imple
}
/**
+ * Directory creation statistics.
+ *
+ * Each thread creates the same (+ or -1) number of directories.
+ * Directory names are pre-generated during initialization.
+ */
+ class MkdirsStats extends OperationStatsBase {
+ // Operation types
+ static final String OP_MKDIRS_NAME = "mkdirs";
+ static final String OP_MKDIRS_USAGE = "-op mkdirs [-threads T] [-dirs N] " +
+ "[-dirsPerDir P]";
+
+ protected FileNameGenerator nameGenerator;
+ protected String[][] dirPaths;
+
+ MkdirsStats(List<String> args) {
+ super();
+ parseArguments(args);
+ }
+
+ @Override
+ String getOpName() {
+ return OP_MKDIRS_NAME;
+ }
+
+ @Override
+ void parseArguments(List<String> args) {
+ boolean ignoreUnrelatedOptions = verifyOpArgument(args);
+ int nrDirsPerDir = 2;
+ for (int i = 2; i < args.size(); i++) { // parse command line
+ if(args.get(i).equals("-dirs")) {
+ if(i+1 == args.size()) printUsage();
+ numOpsRequired = Integer.parseInt(args.get(++i));
+ } else if(args.get(i).equals("-threads")) {
+ if(i+1 == args.size()) printUsage();
+ numThreads = Integer.parseInt(args.get(++i));
+ } else if(args.get(i).equals("-dirsPerDir")) {
+ if(i+1 == args.size()) printUsage();
+ nrDirsPerDir = Integer.parseInt(args.get(++i));
+ } else if(!ignoreUnrelatedOptions)
+ printUsage();
+ }
+ nameGenerator = new FileNameGenerator(getBaseDir(), nrDirsPerDir);
+ }
+
+ @Override
+ void generateInputs(int[] opsPerThread) throws IOException {
+ assert opsPerThread.length == numThreads : "Error opsPerThread.length";
+ nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
+ false);
+ LOG.info("Generate " + numOpsRequired + " inputs for " + getOpName());
+ dirPaths = new String[numThreads][];
+ for(int idx=0; idx < numThreads; idx++) {
+ int threadOps = opsPerThread[idx];
+ dirPaths[idx] = new String[threadOps];
+ for(int jdx=0; jdx < threadOps; jdx++)
+ dirPaths[idx][jdx] = nameGenerator.
+ getNextFileName("ThroughputBench");
+ }
+ }
+
+ /**
+ * returns client name
+ */
+ @Override
+ String getExecutionArgument(int daemonId) {
+ return getClientName(daemonId);
+ }
+
+ /**
+ * Do mkdirs operation.
+ */
+ @Override
+ long executeOp(int daemonId, int inputIdx, String clientName)
+ throws IOException {
+ long start = Time.now();
+ nameNodeProto.mkdirs(dirPaths[daemonId][inputIdx],
+ FsPermission.getDefault(), true);
+ long end = Time.now();
+ return end-start;
+ }
+
+ @Override
+ void printResults() {
+ LOG.info("--- " + getOpName() + " inputs ---");
+ LOG.info("nrDirs = " + numOpsRequired);
+ LOG.info("nrThreads = " + numThreads);
+ LOG.info("nrDirsPerDir = " + nameGenerator.getFilesPerDirectory());
+ printStats();
+ }
+ }
+
+ /**
* Open file statistics.
*
* Measure how many open calls (getBlockLocations())
@@ -1279,6 +1371,7 @@ public class NNThroughputBenchmark imple
System.err.println("Usage: NNThroughputBenchmark"
+ "\n\t" + OperationStatsBase.OP_ALL_USAGE
+ " | \n\t" + CreateFileStats.OP_CREATE_USAGE
+ + " | \n\t" + MkdirsStats.OP_MKDIRS_USAGE
+ " | \n\t" + OpenFileStats.OP_OPEN_USAGE
+ " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
+ " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
@@ -1328,6 +1421,10 @@ public class NNThroughputBenchmark imple
opStat = new CreateFileStats(args);
ops.add(opStat);
}
+ if(runAll || MkdirsStats.OP_MKDIRS_NAME.equals(type)) {
+ opStat = new MkdirsStats(args);
+ ops.add(opStat);
+ }
if(runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
opStat = new OpenFileStats(args);
ops.add(opStat);