You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2016/03/08 20:06:12 UTC
[11/50] [abbrv] hadoop git commit: HDFS-9534. Add CLI command to
clear storage policy from a path. (Contributed by Xiaobing Zhou)
HDFS-9534. Add CLI command to clear storage policy from a path. (Contributed by Xiaobing Zhou)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27941a18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27941a18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27941a18
Branch: refs/heads/yarn-2877
Commit: 27941a1811831e0f2144a2f463d807755cd850b2
Parents: 700b0e4
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Mar 2 18:35:28 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Mar 2 18:35:28 2016 -0800
----------------------------------------------------------------------
.../java/org/apache/hadoop/fs/FileSystem.java | 10 +
.../org/apache/hadoop/fs/FilterFileSystem.java | 5 +
.../org/apache/hadoop/fs/TestHarFileSystem.java | 2 +
.../java/org/apache/hadoop/hdfs/DFSClient.java | 18 ++
.../hadoop/hdfs/DistributedFileSystem.java | 25 ++
.../hadoop/hdfs/protocol/ClientProtocol.java | 13 +
.../ClientNamenodeProtocolTranslatorPB.java | 12 +
.../src/main/proto/ClientNamenodeProtocol.proto | 9 +
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
...tNamenodeProtocolServerSideTranslatorPB.java | 17 ++
.../hdfs/server/namenode/FSDirAttrOp.java | 74 +++--
.../hdfs/server/namenode/FSNamesystem.java | 22 ++
.../hdfs/server/namenode/NameNodeRpcServer.java | 7 +
.../hadoop/hdfs/tools/StoragePolicyAdmin.java | 48 +++-
.../src/site/markdown/ArchivalStorage.md | 15 ++
.../hadoop/hdfs/TestApplyingStoragePolicy.java | 268 +++++++++++++++++++
.../hdfs/tools/TestStoragePolicyCommands.java | 63 +++++
17 files changed, 585 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 8c1d57b..a96ea40 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2659,6 +2659,16 @@ public abstract class FileSystem extends Configured implements Closeable {
}
/**
+ * Unset the storage policy set for a given file or directory.
+ * @param src file or directory path.
+ * @throws IOException
+ */
+ public void unsetStoragePolicy(Path src) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support unsetStoragePolicy");
+ }
+
+ /**
* Query the effective storage policy ID for the given file or directory.
*
* @param src file or directory path.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 4ee7514..3f9aaa4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -633,6 +633,11 @@ public class FilterFileSystem extends FileSystem {
}
@Override
+ public void unsetStoragePolicy(Path src) throws IOException {
+ fs.unsetStoragePolicy(src);
+ }
+
+ @Override
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException {
return fs.getStoragePolicy(src);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
index 26923a8..a8795cc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
@@ -210,6 +210,8 @@ public class TestHarFileSystem {
public void setStoragePolicy(Path src, String policyName)
throws IOException;
+ public void unsetStoragePolicy(Path src) throws IOException;
+
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index da3f745..0976920 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1431,6 +1431,24 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
/**
+ * Unset storage policy set for a given file/directory.
+ * @param src file/directory name
+ */
+ public void unsetStoragePolicy(String src) throws IOException {
+ checkOpen();
+ try (TraceScope ignored = newPathTraceScope("unsetStoragePolicy", src)) {
+ namenode.unsetStoragePolicy(src);
+ } catch (RemoteException e) {
+ throw e.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ SafeModeException.class,
+ NSQuotaExceededException.class,
+ UnresolvedPathException.class,
+ SnapshotAccessControlException.class);
+ }
+ }
+
+ /**
* @param path file/directory name
* @return Get the storage policy for specified path
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 275c63d..b601d6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -514,6 +514,31 @@ public class DistributedFileSystem extends FileSystem {
}
@Override
+ public void unsetStoragePolicy(final Path src)
+ throws IOException {
+ statistics.incrementWriteOps(1);
+ Path absF = fixRelativePart(src);
+ new FileSystemLinkResolver<Void>() {
+ @Override
+ public Void doCall(final Path p) throws IOException {
+ dfs.unsetStoragePolicy(getPathName(p));
+ return null;
+ }
+ @Override
+ public Void next(final FileSystem fs, final Path p) throws IOException {
+ if (fs instanceof DistributedFileSystem) {
+ ((DistributedFileSystem) fs).unsetStoragePolicy(p);
+ return null;
+ } else {
+ throw new UnsupportedOperationException(
+ "Cannot perform unsetStoragePolicy on a "
+ + "non-DistributedFileSystem: " + src + " -> " + p);
+ }
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
public BlockStoragePolicySpi getStoragePolicy(Path path) throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(path);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 3e82eb3..f524d7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -282,6 +282,19 @@ public interface ClientProtocol {
throws IOException;
/**
+ * Unset the storage policy set for a given file or directory.
+ * @param src Path of an existing file/directory.
+ * @throws SnapshotAccessControlException If access is denied
+ * @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>src</code>
+ * contains a symlink
+ * @throws java.io.FileNotFoundException If file/dir <code>src</code> is not
+ * found
+ * @throws QuotaExceededException If changes violate the quota restriction
+ */
+ @Idempotent
+ void unsetStoragePolicy(String src) throws IOException;
+
+ /**
* Get the storage policy for a file/directory.
* @param path
* Path of an existing file/directory.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index d9e6472..adcc507 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -134,6 +134,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Recove
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@@ -1467,6 +1468,17 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
+ public void unsetStoragePolicy(String src) throws IOException {
+ UnsetStoragePolicyRequestProto req = UnsetStoragePolicyRequestProto
+ .newBuilder().setSrc(src).build();
+ try {
+ rpcProxy.unsetStoragePolicy(null, req);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
GetStoragePolicyRequestProto request = GetStoragePolicyRequestProto
.newBuilder().setPath(path).build();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 88f63d0..30732ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -113,6 +113,13 @@ message SetStoragePolicyRequestProto {
message SetStoragePolicyResponseProto { // void response
}
+message UnsetStoragePolicyRequestProto {
+ required string src = 1;
+}
+
+message UnsetStoragePolicyResponseProto {
+}
+
message GetStoragePolicyRequestProto {
required string path = 1;
}
@@ -745,6 +752,8 @@ service ClientNamenodeProtocol {
returns(SetReplicationResponseProto);
rpc setStoragePolicy(SetStoragePolicyRequestProto)
returns(SetStoragePolicyResponseProto);
+ rpc unsetStoragePolicy(UnsetStoragePolicyRequestProto)
+ returns(UnsetStoragePolicyResponseProto);
rpc getStoragePolicy(GetStoragePolicyRequestProto)
returns(GetStoragePolicyResponseProto);
rpc getStoragePolicies(GetStoragePoliciesRequestProto)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 728342a..d922cd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1047,6 +1047,9 @@ Release 2.9.0 - UNRELEASED
HDFS-7964. Add support for async edit logging. (Daryn Sharp)
+ HDFS-9534. Add CLI command to clear storage policy from a path.
+ (Xiaobing Zhou via Arpit Agarwal)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 13c0b14..da7c524 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -157,6 +157,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Remove
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
@@ -262,6 +264,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
GetSnapshottableDirListingResponseProto.newBuilder().build();
static final SetStoragePolicyResponseProto VOID_SET_STORAGE_POLICY_RESPONSE =
SetStoragePolicyResponseProto.newBuilder().build();
+ static final UnsetStoragePolicyResponseProto
+ VOID_UNSET_STORAGE_POLICY_RESPONSE =
+ UnsetStoragePolicyResponseProto.newBuilder().build();
private static final CreateResponseProto VOID_CREATE_RESPONSE =
CreateResponseProto.newBuilder().build();
@@ -1486,6 +1491,18 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
}
@Override
+ public UnsetStoragePolicyResponseProto unsetStoragePolicy(
+ RpcController controller, UnsetStoragePolicyRequestProto request)
+ throws ServiceException {
+ try {
+ server.unsetStoragePolicy(request.getSrc());
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return VOID_UNSET_STORAGE_POLICY_RESPONSE;
+ }
+
+ @Override
public GetStoragePolicyResponseProto getStoragePolicy(
RpcController controller, GetStoragePolicyRequestProto request)
throws ServiceException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 2cba2cb..06942f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.security.AccessControlException;
+import com.google.common.collect.Lists;
+
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
@@ -158,13 +160,31 @@ public class FSDirAttrOp {
return isFile;
}
- static HdfsFileStatus setStoragePolicy(
- FSDirectory fsd, BlockManager bm, String src, final String policyName)
+ static HdfsFileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm,
+ String src) throws IOException {
+ return setStoragePolicy(fsd, bm, src,
+ HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset");
+ }
+
+ static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
+ String src, final String policyName) throws IOException {
+ // get the corresponding policy and make sure the policy name is valid
+ BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
+ if (policy == null) {
+ throw new HadoopIllegalArgumentException(
+ "Cannot find a block policy with the name " + policyName);
+ }
+
+ return setStoragePolicy(fsd, bm, src, policy.getId(), "set");
+ }
+
+ static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
+ String src, final byte policyId, final String operation)
throws IOException {
if (!fsd.isStoragePolicyEnabled()) {
- throw new IOException(
- "Failed to set storage policy since "
- + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
+ throw new IOException(String.format(
+ "Failed to %s storage policy since %s is set to false.", operation,
+ DFS_STORAGE_POLICY_ENABLED_KEY));
}
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@@ -178,14 +198,8 @@ public class FSDirAttrOp {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
- // get the corresponding policy and make sure the policy name is valid
- BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
- if (policy == null) {
- throw new HadoopIllegalArgumentException(
- "Cannot find a block policy with the name " + policyName);
- }
- unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId());
- fsd.getEditLog().logSetStoragePolicy(src, policy.getId());
+ unprotectedSetStoragePolicy(fsd, bm, iip, policyId);
+ fsd.getEditLog().logSetStoragePolicy(src, policyId);
} finally {
fsd.writeUnlock();
}
@@ -446,8 +460,8 @@ public class FSDirAttrOp {
return file.getBlocks();
}
- static void unprotectedSetStoragePolicy(
- FSDirectory fsd, BlockManager bm, INodesInPath iip, byte policyId)
+ static void unprotectedSetStoragePolicy(FSDirectory fsd, BlockManager bm,
+ INodesInPath iip, final byte policyId)
throws IOException {
assert fsd.hasWriteLock();
final INode inode = iip.getLastINode();
@@ -457,10 +471,12 @@ public class FSDirAttrOp {
}
final int snapshotId = iip.getLatestSnapshotId();
if (inode.isFile()) {
- BlockStoragePolicy newPolicy = bm.getStoragePolicy(policyId);
- if (newPolicy.isCopyOnCreateFile()) {
- throw new HadoopIllegalArgumentException(
- "Policy " + newPolicy + " cannot be set after file creation.");
+ if (policyId != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
+ BlockStoragePolicy newPolicy = bm.getStoragePolicy(policyId);
+ if (newPolicy.isCopyOnCreateFile()) {
+ throw new HadoopIllegalArgumentException("Policy " + newPolicy
+ + " cannot be set after file creation.");
+ }
}
BlockStoragePolicy currentPolicy =
@@ -473,7 +489,8 @@ public class FSDirAttrOp {
}
inode.asFile().setStoragePolicyID(policyId, snapshotId);
} else if (inode.isDirectory()) {
- setDirStoragePolicy(fsd, inode.asDirectory(), policyId, snapshotId);
+ setDirStoragePolicy(fsd, inode.asDirectory(), policyId,
+ snapshotId);
} else {
throw new FileNotFoundException(iip.getPath()
+ " is not a file or directory");
@@ -485,11 +502,18 @@ public class FSDirAttrOp {
int latestSnapshotId) throws IOException {
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
XAttr xAttr = BlockStoragePolicySuite.buildXAttr(policyId);
- List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs,
- Arrays.asList(xAttr),
- EnumSet.of(
- XAttrSetFlag.CREATE,
- XAttrSetFlag.REPLACE));
+ List<XAttr> newXAttrs = null;
+ if (policyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
+ List<XAttr> toRemove = Lists.newArrayList();
+ toRemove.add(xAttr);
+ List<XAttr> removed = Lists.newArrayList();
+ newXAttrs = FSDirXAttrOp.filterINodeXAttrs(existingXAttrs, toRemove,
+ removed);
+ } else {
+ newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs,
+ Arrays.asList(xAttr),
+ EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+ }
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, latestSnapshotId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 31f2b93..907a0ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1961,6 +1961,28 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
/**
+ * unset storage policy set for a given file or a directory.
+ *
+ * @param src file/directory path
+ */
+ void unsetStoragePolicy(String src) throws IOException {
+ HdfsFileStatus auditStat;
+ checkOperation(OperationCategory.WRITE);
+ writeLock();
+ try {
+ checkOperation(OperationCategory.WRITE);
+ checkNameNodeSafeMode("Cannot unset storage policy for " + src);
+ auditStat = FSDirAttrOp.unsetStoragePolicy(dir, blockManager, src);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "unsetStoragePolicy", src);
+ throw e;
+ } finally {
+ writeUnlock();
+ }
+ getEditLog().logSync();
+ logAuditEvent(true, "unsetStoragePolicy", src, null, auditStat);
+ }
+ /**
* Get the storage policy for a file or a directory.
*
* @param src
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index c1646c5..275e210 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -771,6 +771,13 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override
+ public void unsetStoragePolicy(String src)
+ throws IOException {
+ checkNNStartup();
+ namesystem.unsetStoragePolicy(src);
+ }
+
+ @Override
public void setStoragePolicy(String src, String policyName)
throws IOException {
checkNNStartup();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 98c8a6b..24079b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -224,9 +224,55 @@ public class StoragePolicyAdmin extends Configured implements Tool {
}
}
+ /* Command to unset the storage policy set for a file/directory */
+ private static class UnsetStoragePolicyCommand
+ implements AdminHelper.Command {
+
+ @Override
+ public String getName() {
+ return "-unsetStoragePolicy";
+ }
+
+ @Override
+ public String getShortUsage() {
+ return "[" + getName() + " -path <path>]\n";
+ }
+
+ @Override
+ public String getLongUsage() {
+ TableListing listing = AdminHelper.getOptionDescriptionListing();
+ listing.addRow("<path>", "The path of the file/directory "
+ + "from which the storage policy will be unset.");
+ return getShortUsage() + "\n"
+ + "Unset the storage policy set for a file/directory.\n\n"
+ + listing.toString();
+ }
+
+ @Override
+ public int run(Configuration conf, List<String> args) throws IOException {
+ final String path = StringUtils.popOptionWithArgument("-path", args);
+ if (path == null) {
+ System.err.println("Please specify the path from which "
+ + "the storage policy will be unsetd.\nUsage: " + getLongUsage());
+ return 1;
+ }
+
+ final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+ try {
+ dfs.unsetStoragePolicy(new Path(path));
+ System.out.println("Unset storage policy from " + path);
+ } catch (Exception e) {
+ System.err.println(AdminHelper.prettifyException(e));
+ return 2;
+ }
+ return 0;
+ }
+ }
+
private static final AdminHelper.Command[] COMMANDS = {
new ListStoragePoliciesCommand(),
new SetStoragePolicyCommand(),
- new GetStoragePolicyCommand()
+ new GetStoragePolicyCommand(),
+ new UnsetStoragePolicyCommand()
};
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index cbfbaa1..803cc91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -26,6 +26,7 @@ Archival Storage, SSD & Memory
* [Storage Policy Commands](#Storage_Policy_Commands)
* [List Storage Policies](#List_Storage_Policies)
* [Set Storage Policy](#Set_Storage_Policy)
+ * [Unset Storage Policy](#Unset_Storage_Policy)
* [Get Storage Policy](#Get_Storage_Policy)
Introduction
@@ -150,6 +151,20 @@ Set a storage policy to a file or a directory.
| `-path <path>` | The path referring to either a directory or a file. |
| `-policy <policy>` | The name of the storage policy. |
+### Unset Storage Policy
+
+Unset a storage policy to a file or a directory. After the unset command the storage policy of the nearest ancestor will apply, and if there is no policy on any ancestor then the default storage policy will apply.
+
+* Command:
+
+ hdfs storagepolicies -unsetStoragePolicy -path <path>
+
+* Arguments:
+
+| | |
+|:---- |:---- |
+| `-path <path>` | The path referring to either a directory or a file. |
+
### Get Storage Policy
Get the storage policy of a file or a directory.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
new file mode 100644
index 0000000..200fab6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
@@ -0,0 +1,268 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestApplyingStoragePolicy {
+ private static final short REPL = 1;
+ private static final int SIZE = 128;
+
+ private static Configuration conf;
+ private static MiniDFSCluster cluster;
+ private static DistributedFileSystem fs;
+
+ @Before
+ public void clusterSetUp() throws IOException {
+ conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
+ cluster.waitActive();
+ fs = cluster.getFileSystem();
+ }
+
+ @After
+ public void clusterShutdown() throws IOException{
+ if(fs != null) {
+ fs.close();
+ fs = null;
+ }
+ if(cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ @Test
+ public void testStoragePolicyByDefault() throws Exception {
+ final Path foo = new Path("/foo");
+ final Path bar = new Path(foo, "bar");
+ final Path wow = new Path(bar, "wow");
+ final Path fooz = new Path(bar, "/fooz");
+ DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
+
+ final BlockStoragePolicySuite suite = BlockStoragePolicySuite
+ .createDefaultSuite();
+ final BlockStoragePolicy hot = suite.getPolicy("HOT");
+
+ /*
+ * test: storage policy is HOT by default or inherited from nearest
+ * ancestor, if not explicitly specified for newly created dir/file.
+ */
+ assertEquals(fs.getStoragePolicy(foo), hot);
+ assertEquals(fs.getStoragePolicy(bar), hot);
+ assertEquals(fs.getStoragePolicy(wow), hot);
+ try {
+ fs.getStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void testSetAndUnsetStoragePolicy() throws Exception {
+ final Path foo = new Path("/foo");
+ final Path bar = new Path(foo, "bar");
+ final Path wow = new Path(bar, "wow");
+ final Path fooz = new Path(bar, "/fooz");
+ DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
+
+ final BlockStoragePolicySuite suite = BlockStoragePolicySuite
+ .createDefaultSuite();
+ final BlockStoragePolicy warm = suite.getPolicy("WARM");
+ final BlockStoragePolicy cold = suite.getPolicy("COLD");
+ final BlockStoragePolicy hot = suite.getPolicy("HOT");
+
+ /*
+ * test: set storage policy
+ */
+ fs.setStoragePolicy(foo, warm.getName());
+ fs.setStoragePolicy(bar, cold.getName());
+ fs.setStoragePolicy(wow, hot.getName());
+ try {
+ fs.setStoragePolicy(fooz, warm.getName());
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: get storage policy after set
+ */
+ assertEquals(fs.getStoragePolicy(foo), warm);
+ assertEquals(fs.getStoragePolicy(bar), cold);
+ assertEquals(fs.getStoragePolicy(wow), hot);
+ try {
+ fs.getStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: unset storage policy
+ */
+ fs.unsetStoragePolicy(foo);
+ fs.unsetStoragePolicy(bar);
+ fs.unsetStoragePolicy(wow);
+ try {
+ fs.unsetStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: get storage policy after unset
+ */
+ assertEquals(fs.getStoragePolicy(foo), hot);
+ assertEquals(fs.getStoragePolicy(bar), hot);
+ assertEquals(fs.getStoragePolicy(wow), hot);
+ try {
+ fs.getStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void testNestedStoragePolicy() throws Exception {
+ final Path foo = new Path("/foo");
+ final Path bar = new Path(foo, "bar");
+ final Path wow = new Path(bar, "wow");
+ final Path fooz = new Path("/foos");
+ DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
+
+ final BlockStoragePolicySuite suite = BlockStoragePolicySuite
+ .createDefaultSuite();
+ final BlockStoragePolicy warm = suite.getPolicy("WARM");
+ final BlockStoragePolicy cold = suite.getPolicy("COLD");
+ final BlockStoragePolicy hot = suite.getPolicy("HOT");
+
+ /*
+ * test: set storage policy
+ */
+ fs.setStoragePolicy(foo, warm.getName());
+ fs.setStoragePolicy(bar, cold.getName());
+ fs.setStoragePolicy(wow, hot.getName());
+ try {
+ fs.setStoragePolicy(fooz, warm.getName());
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: get storage policy after set
+ */
+ assertEquals(fs.getStoragePolicy(foo), warm);
+ assertEquals(fs.getStoragePolicy(bar), cold);
+ assertEquals(fs.getStoragePolicy(wow), hot);
+ try {
+ fs.getStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: unset storage policy in the case of being nested
+ */
+ // unset wow
+ fs.unsetStoragePolicy(wow);
+ // inherit storage policy from wow's nearest ancestor
+ assertEquals(fs.getStoragePolicy(wow), cold);
+ // unset bar
+ fs.unsetStoragePolicy(bar);
+ // inherit storage policy from bar's nearest ancestor
+ assertEquals(fs.getStoragePolicy(bar), warm);
+ // unset foo
+ fs.unsetStoragePolicy(foo);
+ // default storage policy is applied, since no more available ancestors
+ assertEquals(fs.getStoragePolicy(foo), hot);
+ // unset fooz
+ try {
+ fs.unsetStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: default storage policy is applied, since no explicit policies from
+ * ancestors are available
+ */
+ assertEquals(fs.getStoragePolicy(foo), hot);
+ assertEquals(fs.getStoragePolicy(bar), hot);
+ assertEquals(fs.getStoragePolicy(wow), hot);
+ try {
+ fs.getStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void testSetAndGetStoragePolicy() throws IOException {
+ final Path foo = new Path("/foo");
+ final Path bar = new Path(foo, "bar");
+ final Path fooz = new Path("/fooz");
+ DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
+
+ final BlockStoragePolicySuite suite = BlockStoragePolicySuite
+ .createDefaultSuite();
+ final BlockStoragePolicy warm = suite.getPolicy("WARM");
+ final BlockStoragePolicy cold = suite.getPolicy("COLD");
+ final BlockStoragePolicy hot = suite.getPolicy("HOT");
+
+ assertEquals(fs.getStoragePolicy(foo), hot);
+ assertEquals(fs.getStoragePolicy(bar), hot);
+ try {
+ fs.getStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: set storage policy
+ */
+ fs.setStoragePolicy(foo, warm.getName());
+ fs.setStoragePolicy(bar, cold.getName());
+ try {
+ fs.setStoragePolicy(fooz, warm.getName());
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+
+ /*
+ * test: get storage policy after set
+ */
+ assertEquals(fs.getStoragePolicy(foo), warm);
+ assertEquals(fs.getStoragePolicy(bar), cold);
+ try {
+ fs.getStoragePolicy(fooz);
+ } catch (Exception e) {
+ assertTrue(e instanceof FileNotFoundException);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27941a18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index ec0bb66..63f8484 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -62,6 +62,69 @@ public class TestStoragePolicyCommands {
}
}
+
+ @Test
+ public void testSetAndUnsetStoragePolicy() throws Exception {
+ final Path foo = new Path("/foo");
+ final Path bar = new Path(foo, "bar");
+ final Path wow = new Path(bar, "wow");
+ DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
+
+ /*
+ * test: set storage policy
+ */
+ final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
+ DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo -policy WARM", 0,
+ "Set storage policy WARM on " + foo.toString());
+ DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar -policy COLD",
+ 0, "Set storage policy COLD on " + bar.toString());
+ DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar/wow -policy HOT",
+ 0, "Set storage policy HOT on " + wow.toString());
+ DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /fooz -policy WARM",
+ 2, "File/Directory does not exist: /fooz");
+
+ /*
+ * test: get storage policy after set
+ */
+ final BlockStoragePolicySuite suite = BlockStoragePolicySuite
+ .createDefaultSuite();
+ final BlockStoragePolicy warm = suite.getPolicy("WARM");
+ final BlockStoragePolicy cold = suite.getPolicy("COLD");
+ final BlockStoragePolicy hot = suite.getPolicy("HOT");
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
+ "The storage policy of " + foo.toString() + ":\n" + warm);
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
+ "The storage policy of " + bar.toString() + ":\n" + cold);
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar/wow", 0,
+ "The storage policy of " + wow.toString() + ":\n" + hot);
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
+ "File/Directory does not exist: /fooz");
+
+ /*
+ * test: unset storage policy
+ */
+ DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path /foo", 0,
+ "Unset storage policy from " + foo.toString());
+ DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path /foo/bar", 0,
+ "Unset storage policy from " + bar.toString());
+ DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path /foo/bar/wow", 0,
+ "Unset storage policy from " + wow.toString());
+ DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path /fooz", 2,
+ "File/Directory does not exist: /fooz");
+
+ /*
+ * test: get storage policy after unset
+ */
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
+ "The storage policy of " + foo.toString() + " is unspecified");
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
+ "The storage policy of " + bar.toString() + " is unspecified");
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar/wow", 0,
+ "The storage policy of " + wow.toString() + " is unspecified");
+ DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
+ "File/Directory does not exist: /fooz");
+ }
+
@Test
public void testSetAndGetStoragePolicy() throws Exception {
final Path foo = new Path("/foo");