You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ra...@apache.org on 2017/08/27 07:16:09 UTC
[42/50] [abbrv] hadoop git commit: HDFS-11670: [SPS]: Add CLI command
for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.
HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67161da7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67161da7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67161da7
Branch: refs/heads/HDFS-10285
Commit: 67161da751db4c9231a2cc07dc3591618c41ce5a
Parents: 8cc681d
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Mon Jun 19 17:16:49 2017 -0700
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Sun Aug 27 11:56:09 2017 +0530
----------------------------------------------------------------------
.../hadoop/hdfs/tools/StoragePolicyAdmin.java | 92 +++++++++++++++++++-
.../src/site/markdown/ArchivalStorage.md | 21 +++++
.../src/site/markdown/HDFSCommands.md | 2 +
.../hdfs/tools/TestStoragePolicyCommands.java | 43 ++++++++-
4 files changed, 156 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 9c7d048..30420ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import com.google.common.base.Joiner;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
@@ -230,6 +232,92 @@ public class StoragePolicyAdmin extends Configured implements Tool {
}
}
+ /** Command to schedule blocks to move based on specified policy. */
+ private static class SatisfyStoragePolicyCommand implements
+ AdminHelper.Command {
+ @Override
+ public String getName() {
+ return "-satisfyStoragePolicy";
+ }
+
+ @Override
+ public String getShortUsage() {
+ return "[" + getName() + " -path <path>]\n";
+ }
+
+ @Override
+ public String getLongUsage() {
+ TableListing listing = AdminHelper.getOptionDescriptionListing();
+ listing.addRow("<path>", "The path of the file/directory to satisfy"
+ + " storage policy");
+ return getShortUsage() + "\n" +
+ "Schedule blocks to move based on file/directory policy.\n\n" +
+ listing.toString();
+ }
+
+ @Override
+ public int run(Configuration conf, List<String> args) throws IOException {
+ final String path = StringUtils.popOptionWithArgument("-path", args);
+ if (path == null) {
+ System.err.println("Please specify the path for setting the storage " +
+ "policy.\nUsage: " + getLongUsage());
+ return 1;
+ }
+
+ final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+ try {
+ dfs.satisfyStoragePolicy(new Path(path));
+ System.out.println("Scheduled blocks to move based on the current"
+ + " storage policy on " + path);
+ } catch (Exception e) {
+ System.err.println(AdminHelper.prettifyException(e));
+ return 2;
+ }
+ return 0;
+ }
+ }
+
+ /** Command to check storage policy satisfier status. */
+ private static class IsSPSRunningCommand implements AdminHelper.Command {
+ @Override
+ public String getName() {
+ return "-isSPSRunning";
+ }
+
+ @Override
+ public String getShortUsage() {
+ return "[" + getName() + "]\n";
+ }
+
+ @Override
+ public String getLongUsage() {
+ return getShortUsage() + "\n" +
+ "Check the status of Storage Policy Statisfier.\n\n";
+ }
+
+ @Override
+ public int run(Configuration conf, List<String> args) throws IOException {
+ if (!args.isEmpty()) {
+ System.err.print("Can't understand arguments: "
+ + Joiner.on(" ").join(args) + "\n");
+ System.err.println("Usage is " + getLongUsage());
+ return 1;
+ }
+ final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+ try {
+ if(dfs.getClient().isStoragePolicySatisfierRunning()){
+ System.out.println("yes");
+ }else{
+ System.out.println("no");
+ }
+ } catch (Exception e) {
+ System.err.println(AdminHelper.prettifyException(e));
+ return 2;
+ }
+ return 0;
+ }
+ }
+
/* Command to unset the storage policy set for a file/directory */
private static class UnsetStoragePolicyCommand
implements AdminHelper.Command {
@@ -280,6 +368,8 @@ public class StoragePolicyAdmin extends Configured implements Tool {
new ListStoragePoliciesCommand(),
new SetStoragePolicyCommand(),
new GetStoragePolicyCommand(),
- new UnsetStoragePolicyCommand()
+ new UnsetStoragePolicyCommand(),
+ new SatisfyStoragePolicyCommand(),
+ new IsSPSRunningCommand()
};
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 91ad107..f1895fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -169,5 +169,26 @@ Get the storage policy of a file or a directory.
|:---- |:---- |
| `-path <path>` | The path referring to either a directory or a file. |
+### Satisfy Storage Policy
+
+Schedule blocks to move based on file/directory policy. This command applicable only to the given path and its immediate children. Sub-directories won't be considered for satisfying the policy.
+
+* Command:
+
+ hdfs storagepolicies -satisfyStoragePolicy -path <path>
+
+* Arguments:
+
+| | |
+|:---- |:---- |
+| `-path <path>` | The path referring to either a directory or a file. |
+
+### SPS Running Status
+
+Check the running status of Storage Policy Satisfier in namenode. If it is running, return 'yes'. Otherwise return 'no'.
+
+* Command:
+
+ hdfs storagepolicies -isSPSRunning
http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 5903a36..f5ad187 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -576,6 +576,8 @@ Usage:
[-setStoragePolicy -path <path> -policy <policy>]
[-getStoragePolicy -path <path>]
[-unsetStoragePolicy -path <path>]
+ [-satisfyStoragePolicy -path <path>]
+ [-isSPSRunning]
[-help <command-name>]
Lists out all/Gets/sets/unsets storage policies. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/67161da7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index 149dabb..c86eecd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -21,6 +21,8 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -45,7 +47,10 @@ public class TestStoragePolicyCommands {
@Before
public void clusterSetUp() throws IOException {
conf = new HdfsConfiguration();
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
+ StorageType[][] newtypes = new StorageType[][] {
+ {StorageType.ARCHIVE, StorageType.DISK}};
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL)
+ .storageTypes(newtypes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@@ -157,4 +162,40 @@ public class TestStoragePolicyCommands {
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
"File/Directory does not exist: /fooz");
}
+
+ @Test
+ public void testStoragePolicySatisfierCommand() throws Exception {
+ final String file = "/testStoragePolicySatisfierCommand";
+ DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
+
+ final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0,
+ "The storage policy of " + file + " is unspecified");
+
+ DFSTestUtil.toolRun(admin,
+ "-setStoragePolicy -path " + file + " -policy COLD", 0,
+ "Set storage policy COLD on " + file.toString());
+
+ DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0,
+ "Scheduled blocks to move based on the current storage policy on "
+ + file.toString());
+
+ DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
+ fs);
+ }
+
+ @Test
+ public void testIsSPSRunningCommand() throws Exception {
+ final String file = "/testIsSPSRunningCommand";
+ DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
+ final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+ DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "yes");
+ cluster.getNameNode().reconfigureProperty(
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+ cluster.waitActive();
+ DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "no");
+ // Test with unnecessary args
+ DFSTestUtil.toolRun(admin, "-isSPSRunning status", 1,
+ "Can't understand arguments: ");
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org