You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by um...@apache.org on 2018/08/10 03:49:00 UTC
[07/50] [abbrv] hadoop git commit: HDFS-11670: [SPS]: Add CLI command
for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.
HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/300848be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/300848be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/300848be
Branch: refs/heads/HDFS-10285
Commit: 300848be46a3514e41b8c44d5d9d64d21b345087
Parents: da2a71d
Author: Uma Maheswara Rao G <um...@intel.com>
Authored: Mon Jun 19 17:16:49 2017 -0700
Committer: Uma Maheswara Rao Gangumalla <um...@apache.org>
Committed: Thu Aug 9 20:47:20 2018 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/tools/StoragePolicyAdmin.java | 93 +++++++++++++++++++-
.../src/site/markdown/ArchivalStorage.md | 21 +++++
.../src/site/markdown/HDFSCommands.md | 2 +
.../hdfs/tools/TestStoragePolicyCommands.java | 43 ++++++++-
4 files changed, 157 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/300848be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index aeb10d9..662957c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -32,6 +33,8 @@ import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.FileNotFoundException;
+import com.google.common.base.Joiner;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
@@ -245,6 +248,92 @@ public class StoragePolicyAdmin extends Configured implements Tool {
}
}
+ /** Command to schedule blocks to move based on specified policy. */
+ private static class SatisfyStoragePolicyCommand implements
+ AdminHelper.Command {
+ @Override
+ public String getName() {
+ return "-satisfyStoragePolicy";
+ }
+
+ @Override
+ public String getShortUsage() {
+ return "[" + getName() + " -path <path>]\n";
+ }
+
+ @Override
+ public String getLongUsage() {
+ TableListing listing = AdminHelper.getOptionDescriptionListing();
+ listing.addRow("<path>", "The path of the file/directory to satisfy"
+ + " storage policy");
+ return getShortUsage() + "\n" +
+ "Schedule blocks to move based on file/directory policy.\n\n" +
+ listing.toString();
+ }
+
+ @Override
+ public int run(Configuration conf, List<String> args) throws IOException {
+ final String path = StringUtils.popOptionWithArgument("-path", args);
+ if (path == null) {
+ System.err.println("Please specify the path for setting the storage " +
+ "policy.\nUsage: " + getLongUsage());
+ return 1;
+ }
+
+ final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+ try {
+ dfs.satisfyStoragePolicy(new Path(path));
+ System.out.println("Scheduled blocks to move based on the current"
+ + " storage policy on " + path);
+ } catch (Exception e) {
+ System.err.println(AdminHelper.prettifyException(e));
+ return 2;
+ }
+ return 0;
+ }
+ }
+
+ /** Command to check storage policy satisfier status. */
+ private static class IsSPSRunningCommand implements AdminHelper.Command {
+ @Override
+ public String getName() {
+ return "-isSPSRunning";
+ }
+
+ @Override
+ public String getShortUsage() {
+ return "[" + getName() + "]\n";
+ }
+
+ @Override
+ public String getLongUsage() {
+ return getShortUsage() + "\n" +
+ "Check the status of Storage Policy Statisfier.\n\n";
+ }
+
+ @Override
+ public int run(Configuration conf, List<String> args) throws IOException {
+ if (!args.isEmpty()) {
+ System.err.print("Can't understand arguments: "
+ + Joiner.on(" ").join(args) + "\n");
+ System.err.println("Usage is " + getLongUsage());
+ return 1;
+ }
+ final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+ try {
+ if(dfs.getClient().isStoragePolicySatisfierRunning()){
+ System.out.println("yes");
+ }else{
+ System.out.println("no");
+ }
+ } catch (Exception e) {
+ System.err.println(AdminHelper.prettifyException(e));
+ return 2;
+ }
+ return 0;
+ }
+ }
+
/* Command to unset the storage policy set for a file/directory */
private static class UnsetStoragePolicyCommand
implements AdminHelper.Command {
@@ -295,6 +384,8 @@ public class StoragePolicyAdmin extends Configured implements Tool {
new ListStoragePoliciesCommand(),
new SetStoragePolicyCommand(),
new GetStoragePolicyCommand(),
- new UnsetStoragePolicyCommand()
+ new UnsetStoragePolicyCommand(),
+ new SatisfyStoragePolicyCommand(),
+ new IsSPSRunningCommand()
};
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/300848be/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 3c49cb1..a56cf8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -171,5 +171,26 @@ Get the storage policy of a file or a directory.
|:---- |:---- |
| `-path <path>` | The path referring to either a directory or a file. |
+### Satisfy Storage Policy
+
+Schedule blocks to move based on file/directory policy. This command applicable only to the given path and its immediate children. Sub-directories won't be considered for satisfying the policy.
+
+* Command:
+
+ hdfs storagepolicies -satisfyStoragePolicy -path <path>
+
+* Arguments:
+
+| | |
+|:---- |:---- |
+| `-path <path>` | The path referring to either a directory or a file. |
+
+### SPS Running Status
+
+Check the running status of Storage Policy Satisfier in namenode. If it is running, return 'yes'. Otherwise return 'no'.
+
+* Command:
+
+ hdfs storagepolicies -isSPSRunning
http://git-wip-us.apache.org/repos/asf/hadoop/blob/300848be/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 391b71b..8234930 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -615,6 +615,8 @@ Usage:
[-setStoragePolicy -path <path> -policy <policy>]
[-getStoragePolicy -path <path>]
[-unsetStoragePolicy -path <path>]
+ [-satisfyStoragePolicy -path <path>]
+ [-isSPSRunning]
[-help <command-name>]
Lists out all/Gets/sets/unsets storage policies. See the [HDFS Storage Policy Documentation](./ArchivalStorage.html) for more information.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/300848be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index f31c739..59f9083 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -23,6 +23,8 @@ import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -46,7 +48,10 @@ public class TestStoragePolicyCommands {
@Before
public void clusterSetUp() throws IOException, URISyntaxException {
conf = new HdfsConfiguration();
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
+ StorageType[][] newtypes = new StorageType[][] {
+ {StorageType.ARCHIVE, StorageType.DISK}};
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL)
+ .storageTypes(newtypes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@@ -158,4 +163,40 @@ public class TestStoragePolicyCommands {
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
"File/Directory does not exist: /fooz");
}
+
+ @Test
+ public void testStoragePolicySatisfierCommand() throws Exception {
+ final String file = "/testStoragePolicySatisfierCommand";
+ DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
+
+ final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0,
+ "The storage policy of " + file + " is unspecified");
+
+ DFSTestUtil.toolRun(admin,
+ "-setStoragePolicy -path " + file + " -policy COLD", 0,
+ "Set storage policy COLD on " + file.toString());
+
+ DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0,
+ "Scheduled blocks to move based on the current storage policy on "
+ + file.toString());
+
+ DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
+ fs);
+ }
+
+ @Test
+ public void testIsSPSRunningCommand() throws Exception {
+ final String file = "/testIsSPSRunningCommand";
+ DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0);
+ final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+ DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "yes");
+ cluster.getNameNode().reconfigureProperty(
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
+ cluster.waitActive();
+ DFSTestUtil.toolRun(admin, "-isSPSRunning", 0, "no");
+ // Test with unnecessary args
+ DFSTestUtil.toolRun(admin, "-isSPSRunning status", 1,
+ "Can't understand arguments: ");
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org