You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by um...@apache.org on 2014/07/23 21:05:12 UTC
svn commit: r1612922 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/main/java/org/apache/hadoop/hdfs/web/resources/ src/test/java/or...
Author: umamahesh
Date: Wed Jul 23 19:05:11 2014
New Revision: 1612922
URL: http://svn.apache.org/r1612922
Log:
HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code when xattr doesn't exist. (Charles Lamb via umamahesh)
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jul 23 19:05:11 2014
@@ -892,6 +892,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6703. NFS: Files can be deleted from a read-only mount
(Srikanth Upputuri via brandonli)
+ HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code
+ when xattr doesn't exist. (Charles Lamb via umamahesh)
+
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Jul 23 19:05:11 2014
@@ -1337,6 +1337,6 @@ public interface ClientProtocol {
* @param xAttr <code>XAttr</code> to remove
* @throws IOException
*/
- @Idempotent
+ @AtMostOnce
public void removeXAttr(String src, XAttr xAttr) throws IOException;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed Jul 23 19:05:11 2014
@@ -1074,10 +1074,11 @@ public class FSEditLog implements LogsPu
logEdit(op);
}
- void logRemoveXAttrs(String src, List<XAttr> xAttrs) {
+ void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
op.src = src;
op.xAttrs = xAttrs;
+ logRpcIds(op, toLogRpcIds);
logEdit(op);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Wed Jul 23 19:05:11 2014
@@ -821,6 +821,10 @@ public class FSEditLogLoader {
RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
removeXAttrOp.xAttrs);
+ if (toAddRetryCache) {
+ fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,
+ removeXAttrOp.rpcCallId);
+ }
break;
}
default:
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Wed Jul 23 19:05:11 2014
@@ -3551,6 +3551,7 @@ public abstract class FSEditLogOp {
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
src = p.getSrc();
xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
+ readRpcIds(in, logVersion);
}
@Override
@@ -3561,18 +3562,22 @@ public abstract class FSEditLogOp {
}
b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
b.build().writeDelimitedTo(out);
+ // clientId and callId
+ writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendXAttrsToXml(contentHandler, xAttrs);
+ appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
xAttrs = readXAttrsFromXml(st);
+ readRpcIdsFromXml(st);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Jul 23 19:05:11 2014
@@ -8279,11 +8279,12 @@ public class FSNamesystem implements Nam
nnConf.checkXAttrsConfigFlag();
FSPermissionChecker pc = getPermissionChecker();
boolean getAll = xAttrs == null || xAttrs.isEmpty();
- List<XAttr> filteredXAttrs = null;
if (!getAll) {
- filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
- if (filteredXAttrs.isEmpty()) {
- return filteredXAttrs;
+ try {
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "getXAttrs", src);
+ throw e;
}
}
checkOperation(OperationCategory.READ);
@@ -8302,15 +8303,21 @@ public class FSNamesystem implements Nam
if (filteredAll == null || filteredAll.isEmpty()) {
return null;
}
- List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
- for (XAttr xAttr : filteredXAttrs) {
+ List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size());
+ for (XAttr xAttr : xAttrs) {
+ boolean foundIt = false;
for (XAttr a : filteredAll) {
if (xAttr.getNameSpace() == a.getNameSpace()
&& xAttr.getName().equals(a.getName())) {
toGet.add(a);
+ foundIt = true;
break;
}
}
+ if (!foundIt) {
+ throw new IOException(
+ "At least one of the attributes provided was not found.");
+ }
}
return toGet;
}
@@ -8344,17 +8351,42 @@ public class FSNamesystem implements Nam
readUnlock();
}
}
-
+
+ /**
+ * Remove an xattr for a file or directory.
+ *
+ * @param src
+ * - path to remove the xattr from
+ * @param xAttr
+ * - xAttr to remove
+ * @throws AccessControlException
+ * @throws SafeModeException
+ * @throws UnresolvedLinkException
+ * @throws IOException
+ */
void removeXAttr(String src, XAttr xAttr) throws IOException {
- nnConf.checkXAttrsConfigFlag();
- HdfsFileStatus resultingStat = null;
- FSPermissionChecker pc = getPermissionChecker();
+ CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+ if (cacheEntry != null && cacheEntry.isSuccess()) {
+ return; // Return previous response
+ }
+ boolean success = false;
try {
- XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+ removeXAttrInt(src, xAttr, cacheEntry != null);
+ success = true;
} catch (AccessControlException e) {
logAuditEvent(false, "removeXAttr", src);
throw e;
+ } finally {
+ RetryCache.setState(cacheEntry, success);
}
+ }
+
+ void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+ throws IOException {
+ nnConf.checkXAttrsConfigFlag();
+ HdfsFileStatus resultingStat = null;
+ FSPermissionChecker pc = getPermissionChecker();
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
@@ -8368,12 +8400,12 @@ public class FSNamesystem implements Nam
xAttrs.add(xAttr);
List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs);
if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
- getEditLog().logRemoveXAttrs(src, removedXAttrs);
+ getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
+ } else {
+ throw new IOException(
+ "No matching attributes found for remove operation");
}
resultingStat = getAuditFileInfo(src, false);
- } catch (AccessControlException e) {
- logAuditEvent(false, "removeXAttr", src);
- throw e;
} finally {
writeUnlock();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java Wed Jul 23 19:05:11 2014
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.XAttrHelpe
import org.apache.hadoop.security.AccessControlException;
import com.google.common.collect.Lists;
+import com.google.common.base.Preconditions;
/**
* There are four types of extended attributes <XAttr> defined by the
@@ -60,8 +61,20 @@ public class XAttrPermissionFilter {
throw new AccessControlException("User doesn't have permission for xattr: "
+ XAttrHelper.getPrefixName(xAttr));
}
-
- static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
+
+ static void checkPermissionForApi(FSPermissionChecker pc,
+ List<XAttr> xAttrs) throws AccessControlException {
+ Preconditions.checkArgument(xAttrs != null);
+ if (xAttrs.isEmpty()) {
+ return;
+ }
+
+ for (XAttr xAttr : xAttrs) {
+ checkPermissionForApi(pc, xAttr);
+ }
+ }
+
+ static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
List<XAttr> xAttrs) {
assert xAttrs != null : "xAttrs can not be null";
if (xAttrs == null || xAttrs.isEmpty()) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java Wed Jul 23 19:05:11 2014
@@ -25,8 +25,8 @@ public class XAttrNameParam extends Stri
/** Default parameter value. **/
public static final String DEFAULT = "";
- private static Domain DOMAIN = new Domain(NAME,
- Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
+ private static Domain DOMAIN = new Domain(NAME,
+ Pattern.compile(".*"));
public XAttrNameParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Wed Jul 23 19:05:11 2014
@@ -2653,6 +2653,75 @@ public class TestDFSShell {
}
}
+ /*
+ * 1. Test that CLI throws an exception and returns non-0 when user does
+ * not have permission to read an xattr.
+ * 2. Test that CLI throws an exception and returns non-0 when a non-existent
+ * xattr is requested.
+ */
+ @Test (timeout = 120000)
+ public void testGetFAttrErrors() throws Exception {
+ final UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ MiniDFSCluster cluster = null;
+ PrintStream bakErr = null;
+ try {
+ final Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+
+ final FileSystem fs = cluster.getFileSystem();
+ final Path p = new Path("/foo");
+ fs.mkdirs(p);
+ bakErr = System.err;
+
+ final FsShell fshell = new FsShell(conf);
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ System.setErr(new PrintStream(out));
+
+ // No permission for "other".
+ fs.setPermission(p, new FsPermission((short) 0700));
+
+ {
+ final int ret = ToolRunner.run(fshell, new String[] {
+ "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+ assertEquals("Returned should be 0", 0, ret);
+ out.reset();
+ }
+
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ int ret = ToolRunner.run(fshell, new String[] {
+ "-getfattr", "-n", "user.a1", "/foo"});
+ String str = out.toString();
+ assertTrue("xattr value was incorrectly returned",
+ str.indexOf("1234") == -1);
+ out.reset();
+ return null;
+ }
+ });
+
+ {
+ final int ret = ToolRunner.run(fshell, new String[]{
+ "-getfattr", "-n", "user.nonexistent", "/foo"});
+ String str = out.toString();
+ assertTrue("xattr value was incorrectly returned",
+ str.indexOf(
+ "getfattr: At least one of the attributes provided was not found")
+ >= 0);
+ out.reset();
+ }
+ } finally {
+ if (bakErr != null) {
+ System.setErr(bakErr);
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
/**
* Test that the server trash configuration is respected when
* the client configuration is not set.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Wed Jul 23 19:05:11 2014
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.FileNotFoundException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import java.util.List;
@@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.permi
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.READ;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.junit.After;
@@ -261,11 +261,12 @@ public class FSXAttrBaseTest {
fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with empty name should fail.");
+ } catch (RemoteException e) {
+ assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+ HadoopIllegalArgumentException.class.getCanonicalName());
+ GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
- } catch (IllegalArgumentException e) {
- GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " +
- "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
}
// Set xattr with invalid name: "a1"
@@ -274,11 +275,12 @@ public class FSXAttrBaseTest {
XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with invalid name prefix or without " +
"name prefix should fail.");
+ } catch (RemoteException e) {
+ assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+ HadoopIllegalArgumentException.class.getCanonicalName());
+ GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
- } catch (IllegalArgumentException e) {
- GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " +
- "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
}
// Set xattr without XAttrSetFlag
@@ -341,9 +343,18 @@ public class FSXAttrBaseTest {
}
/**
- * Tests for getting xattr
- * 1. To get xattr which does not exist.
- * 2. To get multiple xattrs.
+ * getxattr tests. Test that getxattr throws an exception if any of
+ * the following are true:
+ * an xattr that was requested doesn't exist
+ * the caller specifies an unknown namespace
+ * the caller doesn't have access to the namespace
+ * the caller doesn't have permission to get the value of the xattr
+ * the caller does not have search access to the parent directory
+ * the caller has only read access to the owning directory
+ * the caller has only search access to the owning directory and
+ * execute/search access to the actual entity
+ * the caller does not have search access to the owning directory and read
+ * access to the actual entity
*/
@Test(timeout = 120000)
public void testGetXAttrs() throws Exception {
@@ -351,21 +362,159 @@ public class FSXAttrBaseTest {
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
- // XAttr does not exist.
- byte[] value = fs.getXAttr(path, name3);
- Assert.assertEquals(value, null);
-
- List<String> names = Lists.newArrayList();
- names.add(name1);
- names.add(name2);
- names.add(name3);
- Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
- Assert.assertEquals(xattrs.size(), 2);
- Assert.assertArrayEquals(value1, xattrs.get(name1));
- Assert.assertArrayEquals(value2, xattrs.get(name2));
+ /* An XAttr that was requested does not exist. */
+ try {
+ final byte[] value = fs.getXAttr(path, name3);
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains(
+ "At least one of the attributes provided was not found.", e);
+ }
+
+ /* Throw an exception if an xattr that was requested does not exist. */
+ {
+ final List<String> names = Lists.newArrayList();
+ names.add(name1);
+ names.add(name2);
+ names.add(name3);
+ try {
+ final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains(
+ "At least one of the attributes provided was not found.", e);
+ }
+ }
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
+
+ /* Unknown namespace should throw an exception. */
+ try {
+ final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
+ Assert.fail("expected IOException");
+ } catch (Exception e) {
+ GenericTestUtils.assertExceptionContains
+ ("An XAttr name must be prefixed with user/trusted/security/system, " +
+ "followed by a '.'",
+ e);
+ }
+
+ /*
+ * The 'trusted' namespace should not be accessible and should throw an
+ * exception.
+ */
+ final UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(path, "trusted.foo");
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+ }
+
+ fs.setXAttr(path, name1, "1234".getBytes());
+
+ /*
+ * Test that an exception is thrown if the caller doesn't have permission to
+ * get the value of the xattr.
+ */
+
+ /* Set access so that only the owner has access. */
+ fs.setPermission(path, new FsPermission((short) 0700));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(path, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * The caller must have search access to the parent directory.
+ */
+ final Path childDir = new Path(path, "child" + pathCount);
+ /* Set access to parent so that only the owner has access. */
+ FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+ fs.setXAttr(childDir, name1, "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /* Check that read access to the owning directory is not good enough. */
+ fs.setPermission(path, new FsPermission((short) 0704));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that search access to the owning directory and search/execute
+ * access to the actual entity with extended attributes is not good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0701));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that search access to the owning directory and read access to
+ * the actual entity with the extended attribute is good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0704));
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
}
/**
@@ -402,6 +551,166 @@ public class FSXAttrBaseTest {
fs.removeXAttr(path, name3);
}
+ /**
+ * removexattr tests. Test that removexattr throws an exception if any of
+ * the following are true:
+ * an xattr that was requested doesn't exist
+ * the caller specifies an unknown namespace
+ * the caller doesn't have access to the namespace
+ * the caller doesn't have permission to get the value of the xattr
+ * the caller does not have "execute" (scan) access to the parent directory
+ * the caller has only read access to the owning directory
+ * the caller has only execute access to the owning directory and execute
+ * access to the actual entity
+ * the caller does not have execute access to the owning directory and write
+ * access to the actual entity
+ */
+ @Test(timeout = 120000)
+ public void testRemoveXAttrPermissions() throws Exception {
+ FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+ fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+ fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+ fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
+
+ try {
+ fs.removeXAttr(path, name2);
+ fs.removeXAttr(path, name2);
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("No matching attributes found", e);
+ }
+
+ /* Unknown namespace should throw an exception. */
+ final String expectedExceptionString = "An XAttr name must be prefixed " +
+ "with user/trusted/security/system, followed by a '.'";
+ try {
+ fs.removeXAttr(path, "wackynamespace.foo");
+ Assert.fail("expected IOException");
+ } catch (RemoteException e) {
+ assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+ HadoopIllegalArgumentException.class.getCanonicalName());
+ GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+ } catch (HadoopIllegalArgumentException e) {
+ GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+ }
+
+ /*
+ * The 'trusted' namespace should not be accessible and should throw an
+ * exception.
+ */
+ final UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(path, "trusted.foo");
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+ } finally {
+ fs.removeXAttr(path, "trusted.foo");
+ }
+
+ /*
+ * Test that an exception is thrown if the caller doesn't have permission to
+ * get the value of the xattr.
+ */
+
+ /* Set access so that only the owner has access. */
+ fs.setPermission(path, new FsPermission((short) 0700));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(path, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * The caller must have "execute" (scan) access to the parent directory.
+ */
+ final Path childDir = new Path(path, "child" + pathCount);
+ /* Set access to parent so that only the owner has access. */
+ FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+ fs.setXAttr(childDir, name1, "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /* Check that read access to the owning directory is not good enough. */
+ fs.setPermission(path, new FsPermission((short) 0704));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that execute access to the owning directory and scan access to
+ * the actual entity with extended attributes is not good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0701));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that execute access to the owning directory and write access to
+ * the actual entity with extended attributes is good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0706));
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ }
+
@Test(timeout = 120000)
public void testRenameFileWithXAttr() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Wed Jul 23 19:05:11 2014
@@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -434,7 +434,7 @@ public class TestNamenodeRetryCache {
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Wed Jul 23 19:05:11 2014
@@ -160,7 +160,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn0 = cluster.getNamesystem(0);
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -181,7 +181,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn1 = cluster.getNamesystem(1);
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
@@ -1047,6 +1047,49 @@ public class TestRetryCacheWithHA {
}
}
+ /** removeXAttr */
+ class RemoveXAttrOp extends AtMostOnceOp {
+ private final String src;
+
+ RemoveXAttrOp(DFSClient client, String src) {
+ super("removeXAttr", client);
+ this.src = src;
+ }
+
+ @Override
+ void prepare() throws Exception {
+ Path p = new Path(src);
+ if (!dfs.exists(p)) {
+ DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+ client.setXAttr(src, "user.key", "value".getBytes(),
+ EnumSet.of(XAttrSetFlag.CREATE));
+ }
+ }
+
+ @Override
+ void invoke() throws Exception {
+ client.removeXAttr(src, "user.key");
+ }
+
+ @Override
+ boolean checkNamenodeBeforeReturn() throws Exception {
+ for (int i = 0; i < CHECKTIMES; i++) {
+ Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
+ Set<String> keySet = iter.keySet();
+ if (!keySet.contains("user.key")) {
+ return true;
+ }
+ Thread.sleep(1000);
+ }
+ return false;
+ }
+
+ @Override
+ Object getResult() {
+ return null;
+ }
+ }
+
@Test (timeout=60000)
public void testCreateSnapshot() throws Exception {
final DFSClient client = genClientWithDummyHandler();
@@ -1183,6 +1226,13 @@ public class TestRetryCacheWithHA {
testClientRetryWithFailover(op);
}
+ @Test (timeout=60000)
+ public void testRemoveXAttr() throws Exception {
+ DFSClient client = genClientWithDummyHandler();
+ AtMostOnceOp op = new RemoveXAttrOp(client, "/removexattr");
+ testClientRetryWithFailover(op);
+ }
+
/**
* When NN failover happens, if the client did not receive the response and
* send a retry request to the other NN, the same response should be recieved
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Wed Jul 23 19:05:11 2014
@@ -355,12 +355,6 @@ public class TestParam {
public void testXAttrNameParam() {
final XAttrNameParam p = new XAttrNameParam("user.a1");
Assert.assertEquals(p.getXAttrName(), "user.a1");
- try {
- new XAttrNameParam("a1");
- Assert.fail();
- } catch (IllegalArgumentException e) {
- LOG.info("EXPECTED: " + e);
- }
}
@Test
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1612922&r1=1612921&r2=1612922&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Wed Jul 23 19:05:11 2014
@@ -986,6 +986,8 @@
<NAMESPACE>USER</NAMESPACE>
<NAME>a2</NAME>
</XATTR>
+ <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+ <RPC_CALLID>82</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>