You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/06/23 00:39:44 UTC
[1/5] hadoop git commit: HDFS-10473: Allow only suitable storage
policies to be set on striped files. Contributed by Uma Maheswara Rao G
Repository: hadoop
Updated Branches:
refs/heads/HDFS-1312 4f5beb6e7 -> 7034dcf9b
HDFS-10473: Allow only suitable storage policies to be set on striped files. Contributed by Uma Maheswara Rao G
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17eae9eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17eae9eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17eae9eb
Branch: refs/heads/HDFS-1312
Commit: 17eae9ebb30a3b106c4f6ae0c5374a3ab83abd8a
Parents: 4ee3543
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Wed Jun 22 11:17:43 2016 -0700
Committer: Uma Maheswara Rao G <um...@intel.com>
Committed: Wed Jun 22 11:17:43 2016 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdfs/server/mover/Mover.java | 15 +++-
.../namenode/ErasureCodingPolicyManager.java | 20 +++++
.../hadoop/hdfs/server/namenode/INodeFile.java | 19 ++++-
.../hadoop/hdfs/server/mover/TestMover.java | 32 +++++++
.../server/namenode/TestStripedINodeFile.java | 87 +++++++++++++++++++-
5 files changed, 170 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index b473a4d..cd37b15b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
import org.apache.hadoop.hdfs.server.balancer.Matcher;
import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -386,7 +387,19 @@ public class Mover {
}
LocatedBlock lb = lbs.get(i);
if (lb.isStriped()) {
- types = policy.chooseStorageTypes((short) lb.getLocations().length);
+ if (ErasureCodingPolicyManager
+ .checkStoragePolicySuitableForECStripedMode(policyId)) {
+ types = policy.chooseStorageTypes((short) lb.getLocations().length);
+ } else {
+ // Currently we support only limited policies (HOT, COLD, ALLSSD)
+ // for EC striped mode files.
+ // Mover tool will ignore to move the blocks if the storage policy
+ // is not in EC Striped mode supported policies
+ LOG.warn("The storage policy " + policy.getName()
+ + " is not suitable for Striped EC files. "
+ + "So, Ignoring to move the blocks");
+ return;
+ }
}
final StorageTypeDiff diff = new StorageTypeDiff(types,
lb.getStorageTypes());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index eaf63f9..c4bc8de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -53,6 +53,11 @@ public final class ErasureCodingPolicyManager {
private static final ErasureCodingPolicy[] SYS_POLICIES =
new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3};
+ // Supported storage policies for striped EC files
+ private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = new byte[] {
+ HdfsConstants.HOT_STORAGE_POLICY_ID, HdfsConstants.COLD_STORAGE_POLICY_ID,
+ HdfsConstants.ALLSSD_STORAGE_POLICY_ID };
+
/**
* All active policies maintained in NN memory for fast querying,
* identified and sorted by its name.
@@ -121,6 +126,21 @@ public final class ErasureCodingPolicyManager {
}
/**
+ * @return True if given policy is be suitable for striped EC Files.
+ */
+ public static boolean checkStoragePolicySuitableForECStripedMode(
+ byte storagePolicyID) {
+ boolean isPolicySuitable = false;
+ for (byte suitablePolicy : SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE) {
+ if (storagePolicyID == suitablePolicy) {
+ isPolicySuitable = true;
+ break;
+ }
+ }
+ return isPolicySuitable;
+ }
+
+ /**
* Clear and clean up
*/
public void clear() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 5c10c86..63945a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -496,9 +497,25 @@ public class INodeFile extends INodeWithAdditionalFields
public byte getStoragePolicyID() {
byte id = getLocalStoragePolicyID();
if (id == BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
- return this.getParent() != null ?
+ id = this.getParent() != null ?
this.getParent().getStoragePolicyID() : id;
}
+
+ // For Striped EC files, we support only suitable policies. Current
+ // supported policies are HOT, COLD, ALL_SSD.
+ // If the file was set with any other policies, then we just treat policy as
+ // BLOCK_STORAGE_POLICY_ID_UNSPECIFIED.
+ if (isStriped() && id != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
+ && !ErasureCodingPolicyManager
+ .checkStoragePolicySuitableForECStripedMode(id)) {
+ id = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("The current effective storage policy id : " + id
+ + " is not suitable for striped mode EC file : " + getName()
+ + ". So, just returning unspecified storage policy id");
+ }
+ }
+
return id;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index befab80..f382243 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -526,6 +526,38 @@ public class TestMover {
StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
dataBlocks + parityBlocks);
+ // start 5 more datanodes
+ numOfDatanodes += 5;
+ capacities = new long[5][storagesPerDatanode];
+ for (int i = 0; i < 5; i++) {
+ for (int j = 0; j < storagesPerDatanode; j++) {
+ capacities[i][j] = capacity;
+ }
+ }
+ cluster.startDataNodes(conf, 5,
+ new StorageType[][] { { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK } },
+ true, null, null, null, capacities, null, false, false, false, null);
+ cluster.triggerHeartbeats();
+
+ // move file blocks to ONE_SSD policy
+ client.setStoragePolicy(barDir, "ONE_SSD");
+
+ // run Mover
+ rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
+
+ // verify storage types and locations
+ // Movements should have been ignored for the unsupported policy on
+ // striped file
+ locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
+ for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+ for (StorageType type : lb.getStorageTypes()) {
+ Assert.assertEquals(StorageType.ARCHIVE, type);
+ }
+ }
}finally{
cluster.shutdown();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/17eae9eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index 0d15467..3703501 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -27,21 +27,28 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-
+import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
@@ -287,4 +294,82 @@ public class TestStripedINodeFile {
}
}
}
+
+ /**
+ * Tests when choosing blocks on file creation of EC striped mode should
+ * ignore storage policy if that is not suitable. Supported storage policies
+ * for EC Striped mode are HOT, COLD and ALL_SSD. For all other policies set
+ * will be ignored and considered default policy.
+ */
+ @Test(timeout = 60000)
+ public void testUnsuitableStoragePoliciesWithECStripedMode()
+ throws Exception {
+ final Configuration conf = new HdfsConfiguration();
+ int defaultStripedBlockSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE
+ * 4;
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
+ conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+ conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+ false);
+
+ // start 10 datanodes
+ int numOfDatanodes = 10;
+ int storagesPerDatanode = 2;
+ long capacity = 10 * defaultStripedBlockSize;
+ long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+ for (int i = 0; i < numOfDatanodes; i++) {
+ for (int j = 0; j < storagesPerDatanode; j++) {
+ capacities[i][j] = capacity;
+ }
+ }
+
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode)
+ .storageTypes(
+ new StorageType[][] { { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK },
+ { StorageType.SSD, StorageType.DISK },
+ { StorageType.DISK, StorageType.SSD },
+ { StorageType.DISK, StorageType.SSD },
+ { StorageType.DISK, StorageType.SSD },
+ { StorageType.DISK, StorageType.SSD },
+ { StorageType.DISK, StorageType.SSD } })
+ .storageCapacities(capacities).build();
+
+ try {
+ cluster.waitActive();
+
+ // set "/foo" directory with ONE_SSD storage policy.
+ ClientProtocol client = NameNodeProxies.createProxy(conf,
+ cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+ String fooDir = "/foo";
+ client.mkdirs(fooDir, new FsPermission((short) 777), true);
+ client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
+ // set an EC policy on "/foo" directory
+ client.setErasureCodingPolicy(fooDir, null);
+
+ // write file to fooDir
+ final String barFile = "/foo/bar";
+ long fileLen = 20 * defaultStripedBlockSize;
+ DFSTestUtil.createFile(cluster.getFileSystem(), new Path(barFile),
+ fileLen, (short) 3, 0);
+
+ // verify storage types and locations
+ LocatedBlocks locatedBlocks = client.getBlockLocations(barFile, 0,
+ fileLen);
+ for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+ for (StorageType type : lb.getStorageTypes()) {
+ Assert.assertEquals(StorageType.DISK, type);
+ }
+ }
+
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[3/5] hadoop git commit: HDFS-10551.
o.a.h.h.s.diskbalancer.command.Command does not actually verify options as
expected. Contributed by Anu Engineer.
Posted by ae...@apache.org.
HDFS-10551. o.a.h.h.s.diskbalancer.command.Command does not actually verify options as expected. Contributed by Anu Engineer.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2903282d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2903282d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2903282d
Branch: refs/heads/HDFS-1312
Commit: 2903282dbf40b3dd7e6029a45839717eea0f06f9
Parents: 4f5beb6
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Jun 22 17:29:34 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jun 22 17:29:34 2016 -0700
----------------------------------------------------------------------
.../server/diskbalancer/command/Command.java | 9 +-
.../diskbalancer/command/ExecuteCommand.java | 1 -
.../diskbalancer/command/HelpCommand.java | 1 +
.../diskbalancer/command/PlanCommand.java | 1 +
.../apache/hadoop/hdfs/tools/DiskBalancer.java | 14 ++-
.../src/main/resources/hdfs-default.xml | 40 ++++++++
.../command/TestDiskBalancerCommand.java | 100 ++++++++++++++++---
7 files changed, 141 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2903282d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 3ea1b03..de77365 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -82,8 +82,6 @@ public abstract class Command extends Configured {
public Command(Configuration conf) {
super(conf);
// These arguments are valid for all commands.
- addValidCommandParameters(DiskBalancer.HELP, "Help for this command");
- addValidCommandParameters("arg", "");
topNodes = 0;
}
@@ -248,12 +246,13 @@ public abstract class Command extends Configured {
Iterator<Option> iter = cmd.iterator();
while (iter.hasNext()) {
Option opt = iter.next();
- if (!validArgs.containsKey(opt.getArgName())) {
+
+ if (!validArgs.containsKey(opt.getLongOpt())) {
String errMessage = String
.format("%nInvalid argument found for command %s : %s%n",
- commandName, opt.getArgName());
+ commandName, opt.getLongOpt());
StringBuilder validArguments = new StringBuilder();
- validArguments.append("Valid arguments are : %n");
+ validArguments.append(String.format("Valid arguments are : %n"));
for (Map.Entry<String, String> args : validArgs.entrySet()) {
String key = args.getKey();
String desc = args.getValue();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2903282d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
index 5fd1f0a..91bce37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
@@ -47,7 +47,6 @@ public class ExecuteCommand extends Command {
public ExecuteCommand(Configuration conf) {
super(conf);
addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan.");
- addValidCommandParameters(DiskBalancer.NODE, "Name of the target node.");
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2903282d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
index 205df3d..3c2fd0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java
@@ -37,6 +37,7 @@ public class HelpCommand extends Command {
*/
public HelpCommand(Configuration conf) {
super(conf);
+ addValidCommandParameters(DiskBalancer.HELP, "Help Command");
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2903282d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 20b4c6f..54a63ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -74,6 +74,7 @@ public class PlanCommand extends Command {
"between 2 disks");
addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " +
"verbose mode.");
+ addValidCommandParameters(DiskBalancer.PLAN, "Plan Command");
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2903282d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
index 612aa2c..70912d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
@@ -399,13 +399,19 @@ public class DiskBalancer extends Configured implements Tool {
getReportOptions().addOption(report);
opt.addOption(report);
- Option top = new Option(TOP, true,
- "specify the number of nodes to be listed which has data imbalance.");
+ Option top = OptionBuilder.withLongOpt(TOP)
+ .hasArg()
+ .withDescription("specify the number of nodes to be listed which has" +
+ " data imbalance.")
+ .create();
getReportOptions().addOption(top);
opt.addOption(top);
- Option node = new Option(NODE, true,
- "Datanode address, it can be DataNodeID, IP or hostname.");
+ Option node = OptionBuilder.withLongOpt(NODE)
+ .hasArg()
+ .withDescription("Datanode address, " +
+ "it can be DataNodeID, IP or hostname.")
+ .create();
getReportOptions().addOption(node);
opt.addOption(node);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2903282d/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index fc2f942..856e6b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4086,4 +4086,44 @@
Truststore password for HTTPS SSL configuration
</description>
</property>
+
+<!--Disk baalncer properties-->
+ <property>
+ <name>dfs.disk.balancer.max.disk.throughputInMBperSec</name>
+ <value>10</value>
+ <description>Maximum disk bandwidth used by diskbalancer
+ during read from a source disk. The unit is MB/sec.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.disk.balancer.block.tolerance.percent</name>
+ <value>10</value>
+ <description>
+ When a disk balancer copy operation is proceeding, the datanode is still
+ active. So it might not be possible to move the exactly specified
+ amount of data. So tolerance allows us to define a percentage which
+ defines a good enough move.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.disk.balancer.max.disk.errors</name>
+ <value>5</value>
+ <description>
+ During a block move from a source to destination disk, we might
+ encounter various errors. This defines how many errors we can tolerate
+ before we declare a move between 2 disks (or a step) has failed.
+ </description>
+ </property>
+
+
+ <property>
+ <name>dfs.disk.balancer.enabled</name>
+ <value>false</value>
+ <description>
+ This enables the diskbalancer feature on a cluster. By default, disk
+ balancer is disabled.
+ </description>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2903282d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index c1c137d..ceb762f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -5,9 +5,9 @@
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
- * <p/>
+ * <p>
* http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
+ * <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@@ -44,16 +44,27 @@ import org.junit.Test;
import com.google.common.collect.Lists;
+import static org.apache.hadoop.hdfs.tools.DiskBalancer.CANCEL;
+import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
+import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
+import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
+import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
+
+import org.junit.Rule;
+import org.junit.rules.ExpectedException;
+
/**
* Tests various CLI commands of DiskBalancer.
*/
public class TestDiskBalancerCommand {
+ @Rule
+ public ExpectedException thrown = ExpectedException.none();
private MiniDFSCluster cluster;
private URI clusterJson;
+ private Configuration conf = new HdfsConfiguration();
@Before
public void setUp() throws Exception {
- Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.storagesPerDatanode(2).build();
@@ -73,7 +84,7 @@ public class TestDiskBalancerCommand {
}
/* test basic report */
- @Test(timeout=60000)
+ @Test(timeout = 60000)
public void testReportSimple() throws Exception {
final String cmdLine = "hdfs diskbalancer -report";
final List<String> outputs = runCommand(cmdLine);
@@ -101,7 +112,7 @@ public class TestDiskBalancerCommand {
}
/* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
- @Test(timeout=60000)
+ @Test(timeout = 60000)
public void testReportLessThanTotal() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top 32";
final List<String> outputs = runCommand(cmdLine);
@@ -124,7 +135,7 @@ public class TestDiskBalancerCommand {
}
/* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
- @Test(timeout=60000)
+ @Test(timeout = 60000)
public void testReportMoreThanTotal() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top 128";
final List<String> outputs = runCommand(cmdLine);
@@ -148,7 +159,7 @@ public class TestDiskBalancerCommand {
}
/* test invalid top limit, e.g., -report -top xx */
- @Test(timeout=60000)
+ @Test(timeout = 60000)
public void testReportInvalidTopLimit() throws Exception {
final String cmdLine = "hdfs diskbalancer -report -top xx";
final List<String> outputs = runCommand(cmdLine);
@@ -174,10 +185,10 @@ public class TestDiskBalancerCommand {
containsString("9 volumes with node data density 1.97"))));
}
- @Test(timeout=60000)
+ @Test(timeout = 60000)
public void testReportNode() throws Exception {
final String cmdLine =
- "hdfs diskbalancer -report -node " +
+ "hdfs diskbalancer -report -node " +
"a87654a9-54c7-4693-8dd9-c9c7021dc340";
final List<String> outputs = runCommand(cmdLine);
@@ -249,11 +260,8 @@ public class TestDiskBalancerCommand {
containsString("0.25 free: 490407853993/2000000000000"))));
}
- @Test(timeout=60000)
+ @Test(timeout = 60000)
public void testReadClusterFromJson() throws Exception {
- Configuration conf = new HdfsConfiguration();
- conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
-
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson,
conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(
@@ -262,10 +270,72 @@ public class TestDiskBalancerCommand {
assertEquals(64, diskBalancerCluster.getNodes().size());
}
- private List<String> runCommand(final String cmdLine) throws Exception {
+ /* test -plan DataNodeID */
+ @Test(timeout = 60000)
+ public void testPlanNode() throws Exception {
+ final String planArg = String.format("-%s %s", PLAN,
+ cluster.getDataNodes().get(0).getDatanodeUuid());
+
+ final String cmdLine = String
+ .format(
+ "hdfs diskbalancer %s", planArg);
+ runCommand(cmdLine);
+ }
+
+ /* Test that illegal arguments are handled correctly*/
+ @Test(timeout = 60000)
+ public void testIllegalArgument() throws Exception {
+ final String planArg = String.format("-%s %s", PLAN,
+ "a87654a9-54c7-4693-8dd9-c9c7021dc340");
+
+ final String cmdLine = String
+ .format(
+ "hdfs diskbalancer %s -report", planArg);
+ // -plan and -report cannot be used together.
+ // tests the validate command line arguments function.
+ thrown.expect(java.lang.IllegalArgumentException.class);
+ runCommand(cmdLine);
+ }
+
+ @Test(timeout = 60000)
+ public void testCancelCommand() throws Exception {
+ final String cancelArg = String.format("-%s %s", CANCEL, "nosuchplan");
+ final String nodeArg = String.format("-%s %s", NODE,
+ cluster.getDataNodes().get(0).getDatanodeUuid());
+ // Port:Host format is expected. So cancel command will throw.
+ thrown.expect(java.lang.IllegalArgumentException.class);
+ final String cmdLine = String
+ .format(
+ "hdfs diskbalancer %s %s", cancelArg, nodeArg);
+ runCommand(cmdLine);
+ }
+
+ /*
+ Makes an invalid query attempt to non-existent Datanode.
+ */
+ @Test(timeout = 60000)
+ public void testQueryCommand() throws Exception {
+ final String queryArg = String.format("-%s %s", QUERY,
+ cluster.getDataNodes().get(0).getDatanodeUuid());
+ thrown.expect(java.net.UnknownHostException.class);
+ final String cmdLine = String
+ .format(
+ "hdfs diskbalancer %s", queryArg);
+ runCommand(cmdLine);
+ }
+
+ @Test(timeout = 60000)
+ public void testHelpCommand() throws Exception {
+ final String helpArg = String.format("-%s", HELP);
+ final String cmdLine = String
+ .format(
+ "hdfs diskbalancer %s", helpArg);
+ runCommand(cmdLine);
+ }
+
+ private List<String> runCommand(final String cmdLine) throws Exception {
String[] cmds = StringUtils.split(cmdLine, ' ');
- Configuration conf = new HdfsConfiguration();
org.apache.hadoop.hdfs.tools.DiskBalancer db =
new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[5/5] hadoop git commit: Merge branch 'trunk' into HDFS-1312
Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-1312
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7034dcf9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7034dcf9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7034dcf9
Branch: refs/heads/HDFS-1312
Commit: 7034dcf9b81ace4d39a1270bcbad8b583baaa6c1
Parents: edb4197 79a7289
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Jun 22 17:36:52 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jun 22 17:36:52 2016 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdfs/server/mover/Mover.java | 15 +++-
.../namenode/ErasureCodingPolicyManager.java | 20 +++++
.../hadoop/hdfs/server/namenode/INodeFile.java | 19 ++++-
.../hadoop/hdfs/server/mover/TestMover.java | 32 +++++++
.../server/namenode/TestStripedINodeFile.java | 87 +++++++++++++++++++-
.../mapreduce/task/reduce/MergeManagerImpl.java | 2 +-
.../src/main/resources/mapred-default.xml | 3 +-
.../mapreduce/task/reduce/TestMergeManager.java | 18 ++++
8 files changed, 191 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[2/5] hadoop git commit: MAPREDUCE-6721.
mapreduce.reduce.shuffle.memory.limit.percent=0.0 should be legal to enforce
shuffle to disk. (Gera Shegalov via ozawa)
Posted by ae...@apache.org.
MAPREDUCE-6721. mapreduce.reduce.shuffle.memory.limit.percent=0.0 should be legal to enforce shuffle to disk. (Gera Shegalov via ozawa)
This closes #102
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79a72891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79a72891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79a72891
Branch: refs/heads/HDFS-1312
Commit: 79a7289165510072c46779251ebb010248cb0ce8
Parents: 17eae9e
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Jun 22 17:13:50 2016 -0700
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Wed Jun 22 17:20:13 2016 -0700
----------------------------------------------------------------------
.../mapreduce/task/reduce/MergeManagerImpl.java | 2 +-
.../src/main/resources/mapred-default.xml | 3 ++-
.../mapreduce/task/reduce/TestMergeManager.java | 18 ++++++++++++++++++
3 files changed, 21 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79a72891/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index c99a330..1673ff8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -178,7 +178,7 @@ public class MergeManagerImpl<K, V> implements MergeManager<K, V> {
final float singleShuffleMemoryLimitPercent =
jobConf.getFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,
DEFAULT_SHUFFLE_MEMORY_LIMIT_PERCENT);
- if (singleShuffleMemoryLimitPercent <= 0.0f
+ if (singleShuffleMemoryLimitPercent < 0.0f
|| singleShuffleMemoryLimitPercent > 1.0f) {
throw new IllegalArgumentException("Invalid value for "
+ MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT + ": "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79a72891/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index d973bd4..ebc43aa 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -447,7 +447,8 @@
<name>mapreduce.reduce.shuffle.memory.limit.percent</name>
<value>0.25</value>
<description>Expert: Maximum percentage of the in-memory limit that a
- single shuffle can consume</description>
+ single shuffle can consume. Range of valid values is [0.0, 1.0]. If the value
+ is 0.0 map outputs are shuffled directly to disk.</description>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79a72891/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java
index ef860af..1c0d25b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MROutputFiles;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath;
import org.junit.Assert;
import org.junit.Test;
@@ -289,4 +290,21 @@ public class TestMergeManager {
assertTrue("Large in-memory reduce area unusable: " + maxInMemReduce,
maxInMemReduce > Integer.MAX_VALUE);
}
+
+ @Test
+ public void testZeroShuffleMemoryLimitPercent() throws Exception {
+ final JobConf jobConf = new JobConf();
+ jobConf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT, 0.0f);
+ final MergeManager<Text, Text> mgr =
+ new MergeManagerImpl<>(null, jobConf, mock(LocalFileSystem.class),
+ null, null, null, null, null, null, null, null, null, null,
+ new MROutputFiles());
+ final long mapOutputSize = 10;
+ final int fetcher = 1;
+ final MapOutput<Text, Text> mapOutput = mgr.reserve(
+ TaskAttemptID.forName("attempt_0_1_m_1_1"),
+ mapOutputSize, fetcher);
+ assertEquals("Tiny map outputs should be shuffled to disk", "DISK",
+ mapOutput.getDescription());
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[4/5] hadoop git commit: HDFS-10552. DiskBalancer "-query" results in
NPE if no plan for the node. Contributed by Anu Engineer.
Posted by ae...@apache.org.
HDFS-10552. DiskBalancer "-query" results in NPE if no plan for the node. Contributed by Anu Engineer.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edb41973
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edb41973
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edb41973
Branch: refs/heads/HDFS-1312
Commit: edb41973f5fcf9456bddfd902b6cc0a6f42b3dee
Parents: 2903282
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Jun 22 17:35:55 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jun 22 17:35:55 2016 -0700
----------------------------------------------------------------------
.../hdfs/server/datanode/DiskBalancer.java | 5 +++-
.../diskbalancer/command/QueryCommand.java | 2 +-
.../command/TestDiskBalancerCommand.java | 25 ++++++++++++++++++++
3 files changed, 30 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/edb41973/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index b31b997..5a1fb9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -104,6 +104,7 @@ public class DiskBalancer {
scheduler = Executors.newSingleThreadExecutor();
lock = new ReentrantLock();
workMap = new ConcurrentHashMap<>();
+ this.planID = ""; // to keep protobuf happy.
this.isDiskBalancerEnabled = conf.getBoolean(
DFSConfigKeys.DFS_DISK_BALANCER_ENABLED,
DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT);
@@ -223,7 +224,9 @@ public class DiskBalancer {
lock.lock();
try {
checkDiskBalancerEnabled();
- if ((this.planID == null) || (!this.planID.equals(planID))) {
+ if (this.planID == null ||
+ !this.planID.equals(planID) ||
+ this.planID.isEmpty()) {
LOG.error("Disk Balancer - No such plan. Cancel plan failed. PlanID: " +
planID);
throw new DiskBalancerException("No such plan.",
http://git-wip-us.apache.org/repos/asf/hadoop/blob/edb41973/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index 6c759e2..fac1e51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -63,7 +63,7 @@ public class QueryCommand extends Command {
String nodeAddress = nodeName;
// if the string is not name:port format use the default port.
- if (!nodeName.matches("^.*:\\d$")) {
+ if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
int defaultIPC = NetUtils.createSocketAddr(
getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/edb41973/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index ceb762f..b0821e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
@@ -351,4 +352,28 @@ public class TestDiskBalancerCommand {
}
return outputs;
}
+
+ /**
+ * Making sure that we can query the node without having done a submit.
+ * @throws Exception
+ */
+ @Test
+ public void testDiskBalancerQueryWithoutSubmit() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
+ final int numDatanodes = 2;
+ MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(numDatanodes).build();
+ try {
+ miniDFSCluster.waitActive();
+ DataNode dataNode = miniDFSCluster.getDataNodes().get(0);
+ final String queryArg = String.format("-query localhost:%d", dataNode
+ .getIpcPort());
+ final String cmdLine = String.format("hdfs diskbalancer %s",
+ queryArg);
+ runCommand(cmdLine);
+ } finally {
+ miniDFSCluster.shutdown();
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org