You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by um...@apache.org on 2014/05/21 15:57:36 UTC
svn commit: r1596575 [1/2] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/
src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/ha...
Author: umamahesh
Date: Wed May 21 13:57:33 2014
New Revision: 1596575
URL: http://svn.apache.org/r1596575
Log:
Merge HDFS-2006 HDFS XAttrs branch to Trunk
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
- copied unchanged from r1596568, hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
Removed:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs:r1588992-1596568
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed May 21 13:57:33 2014
@@ -254,6 +254,69 @@ Trunk (Unreleased)
HDFS-5794. Fix the inconsistency of layout version number of
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
+ BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
+
+ HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
+
+ HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
+
+ HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
+
+ HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
+
+ HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
+
+ HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
+
+ HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
+ (Yi Liu via umamahesh)
+
+ HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
+
+ HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
+
+ HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
+
+ HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
+
+ HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
+
+ HDFS-6283. Write end user documentation for xattrs. (wang)
+
+ HDFS-6412. Interface audience and stability annotations missing from
+ several new classes related to xattrs. (wang)
+
+ HDFS-6259. Support extended attributes via WebHDFS. (yliu)
+
+ HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
+ (Yi Liu via umamahesh)
+
+ HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
+ (umamahesh via wang)
+
+ HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
+
+ HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
+
+ HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
+
+ HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
+ (umamahesh)
+
+ HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
+
+ HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
+ methods cannot throw AclException. (wang)
+
+ HDFS-6413. xattr names erroneously handled as case-insensitive.
+ (Charles Lamb via cnauroth)
+
+ HDFS-6414. xattr modification operations are based on state of latest
+ snapshot instead of current version of inode. (Andrew Wang via cnauroth)
+
+ HDFS-6374. setXAttr should require the user to be the owner of the file
+ or directory (Charles Lamb via wang)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/pom.xml Wed May 21 13:57:33 2014
@@ -290,6 +290,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
<include>NamenodeProtocol.proto</include>
<include>QJournalProtocol.proto</include>
<include>acl.proto</include>
+ <include>xattr.proto</include>
<include>datatransfer.proto</include>
<include>fsimage.proto</include>
<include>hdfs.proto</include>
Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1588992-1596568
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java Wed May 21 13:57:33 2014
@@ -25,6 +25,7 @@ import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
+import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -414,6 +415,33 @@ public class Hdfs extends AbstractFileSy
public AclStatus getAclStatus(Path path) throws IOException {
return dfs.getAclStatus(getUriPath(path));
}
+
+ @Override
+ public void setXAttr(Path path, String name, byte[] value,
+ EnumSet<XAttrSetFlag> flag) throws IOException {
+ dfs.setXAttr(getUriPath(path), name, value, flag);
+ }
+
+ @Override
+ public byte[] getXAttr(Path path, String name) throws IOException {
+ return dfs.getXAttr(getUriPath(path), name);
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+ return dfs.getXAttrs(getUriPath(path));
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+ throws IOException {
+ return dfs.getXAttrs(getUriPath(path), names);
+ }
+
+ @Override
+ public void removeXAttr(Path path, String name) throws IOException {
+ dfs.removeXAttr(getUriPath(path), name);
+ }
/**
* Renew an existing delegation token.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Wed May 21 13:57:33 2014
@@ -109,6 +109,8 @@ import org.apache.hadoop.fs.MD5MD5CRC32C
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
@@ -2757,6 +2759,72 @@ public class DFSClient implements java.i
UnresolvedPathException.class);
}
}
+
+ public void setXAttr(String src, String name, byte[] value,
+ EnumSet<XAttrSetFlag> flag) throws IOException {
+ checkOpen();
+ try {
+ namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ NSQuotaExceededException.class,
+ SafeModeException.class,
+ SnapshotAccessControlException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public byte[] getXAttr(String src, String name) throws IOException {
+ checkOpen();
+ try {
+ final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
+ final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
+ return XAttrHelper.getFirstXAttrValue(result);
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public Map<String, byte[]> getXAttrs(String src) throws IOException {
+ checkOpen();
+ try {
+ return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public Map<String, byte[]> getXAttrs(String src, List<String> names)
+ throws IOException {
+ checkOpen();
+ try {
+ return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
+ src, XAttrHelper.buildXAttrs(names)));
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public void removeXAttr(String src, String name) throws IOException {
+ checkOpen();
+ try {
+ namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ NSQuotaExceededException.class,
+ SafeModeException.class,
+ SnapshotAccessControlException.class,
+ UnresolvedPathException.class);
+ }
+ }
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed May 21 13:57:33 2014
@@ -192,6 +192,8 @@ public class DFSConfigKeys extends Commo
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled";
public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false;
+ public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled";
+ public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
public static final String DFS_ADMIN = "dfs.cluster.administrators";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
@@ -295,6 +297,11 @@ public class DFSConfigKeys extends Commo
public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file";
public static final long DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 1024*1024;
+ public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode";
+ public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
+ public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";
+ public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+
//Following keys have no defaults
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Wed May 21 13:57:33 2014
@@ -25,6 +25,7 @@ import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsServerDefa
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
@@ -1769,4 +1771,91 @@ public class DistributedFileSystem exten
}
}.resolve(this, absF);
}
+
+ @Override
+ public void setXAttr(Path path, final String name, final byte[] value,
+ final EnumSet<XAttrSetFlag> flag) throws IOException {
+ Path absF = fixRelativePart(path);
+ new FileSystemLinkResolver<Void>() {
+
+ @Override
+ public Void doCall(final Path p) throws IOException {
+ dfs.setXAttr(getPathName(p), name, value, flag);
+ return null;
+ }
+
+ @Override
+ public Void next(final FileSystem fs, final Path p) throws IOException {
+ fs.setXAttr(p, name, value, flag);
+ return null;
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public byte[] getXAttr(Path path, final String name) throws IOException {
+ final Path absF = fixRelativePart(path);
+ return new FileSystemLinkResolver<byte[]>() {
+ @Override
+ public byte[] doCall(final Path p) throws IOException {
+ return dfs.getXAttr(getPathName(p), name);
+ }
+ @Override
+ public byte[] next(final FileSystem fs, final Path p)
+ throws IOException, UnresolvedLinkException {
+ return fs.getXAttr(p, name);
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+ final Path absF = fixRelativePart(path);
+ return new FileSystemLinkResolver<Map<String, byte[]>>() {
+ @Override
+ public Map<String, byte[]> doCall(final Path p) throws IOException {
+ return dfs.getXAttrs(getPathName(p));
+ }
+ @Override
+ public Map<String, byte[]> next(final FileSystem fs, final Path p)
+ throws IOException, UnresolvedLinkException {
+ return fs.getXAttrs(p);
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
+ throws IOException {
+ final Path absF = fixRelativePart(path);
+ return new FileSystemLinkResolver<Map<String, byte[]>>() {
+ @Override
+ public Map<String, byte[]> doCall(final Path p) throws IOException {
+ return dfs.getXAttrs(getPathName(p), names);
+ }
+ @Override
+ public Map<String, byte[]> next(final FileSystem fs, final Path p)
+ throws IOException, UnresolvedLinkException {
+ return fs.getXAttrs(p, names);
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public void removeXAttr(Path path, final String name) throws IOException {
+ Path absF = fixRelativePart(path);
+ new FileSystemLinkResolver<Void>() {
+ @Override
+ public Void doCall(final Path p) throws IOException {
+ dfs.removeXAttr(getPathName(p), name);
+ return null;
+ }
+
+ @Override
+ public Void next(final FileSystem fs, final Path p) throws IOException {
+ fs.removeXAttr(p, name);
+ return null;
+ }
+ }.resolve(this, absF);
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed May 21 13:57:33 2014
@@ -31,10 +31,12 @@ import org.apache.hadoop.fs.FileAlreadyE
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -1254,4 +1256,66 @@ public interface ClientProtocol {
*/
@Idempotent
public AclStatus getAclStatus(String src) throws IOException;
+
+ /**
+ * Set xattr of a file or directory.
+ * A regular user only can set xattr of "user" namespace.
+ * A super user can set xattr of "user" and "trusted" namespace.
+ * XAttr of "security" and "system" namespace is only used/exposed
+ * internally to the FS impl.
+ * <p/>
+ * For xattr of "user" namespace, its access permissions are
+ * defined by the file or directory permission bits.
+ * XAttr will be set only when login user has correct permissions.
+ * <p/>
+ * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+ * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+ * @param src file or directory
+ * @param xAttr <code>XAttr</code> to set
+ * @param flag set flag
+ * @throws IOException
+ */
+ @AtMostOnce
+ public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+ throws IOException;
+
+ /**
+ * Get xattrs of file or directory. Values in xAttrs parameter are ignored.
+ * If xattrs is null or empty, equals getting all xattrs of the file or
+ * directory.
+ * Only xattrs which login user has correct permissions will be returned.
+ * <p/>
+ * A regular user only can get xattr of "user" namespace.
+ * A super user can get xattr of "user" and "trusted" namespace.
+ * XAttr of "security" and "system" namespace is only used/exposed
+ * internally to the FS impl.
+ * <p/>
+ * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+ * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+ * @param src file or directory
+ * @param xAttrs xAttrs to get
+ * @return List<XAttr> <code>XAttr</code> list
+ * @throws IOException
+ */
+ @Idempotent
+ public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
+ throws IOException;
+
+ /**
+ * Remove xattr of a file or directory.Value in xAttr parameter is ignored.
+ * Name must be prefixed with user/trusted/security/system.
+ * <p/>
+ * A regular user only can remove xattr of "user" namespace.
+ * A super user can remove xattr of "user" and "trusted" namespace.
+ * XAttr of "security" and "system" namespace is only used/exposed
+ * internally to the FS impl.
+ * <p/>
+ * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+ * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+ * @param src file or directory
+ * @param xAttr <code>XAttr</code> to remove
+ * @throws IOException
+ */
+ @Idempotent
+ public void removeXAttr(String src, XAttr xAttr) throws IOException;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Wed May 21 13:57:33 2014
@@ -174,6 +174,12 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
@@ -302,6 +308,12 @@ public class ClientNamenodeProtocolServe
private static final RemoveAclResponseProto
VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance();
+
+ private static final SetXAttrResponseProto
+ VOID_SETXATTR_RESPONSE = SetXAttrResponseProto.getDefaultInstance();
+
+ private static final RemoveXAttrResponseProto
+ VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
/**
* Constructor
@@ -1262,4 +1274,38 @@ public class ClientNamenodeProtocolServe
throw new ServiceException(e);
}
}
+
+ @Override
+ public SetXAttrResponseProto setXAttr(RpcController controller,
+ SetXAttrRequestProto req) throws ServiceException {
+ try {
+ server.setXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()),
+ PBHelper.convert(req.getFlag()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return VOID_SETXATTR_RESPONSE;
+ }
+
+ @Override
+ public GetXAttrsResponseProto getXAttrs(RpcController controller,
+ GetXAttrsRequestProto req) throws ServiceException {
+ try {
+ return PBHelper.convertXAttrsResponse(server.getXAttrs(req.getSrc(),
+ PBHelper.convertXAttrs(req.getXAttrsList())));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RemoveXAttrResponseProto removeXAttr(RpcController controller,
+ RemoveXAttrRequestProto req) throws ServiceException {
+ try {
+ server.removeXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return VOID_REMOVEXATTR_RESPONSE;
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Wed May 21 13:57:33 2014
@@ -35,6 +35,8 @@ import org.apache.hadoop.fs.FsServerDefa
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -141,6 +143,9 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -1268,4 +1273,47 @@ public class ClientNamenodeProtocolTrans
throw ProtobufHelper.getRemoteException(e);
}
}
+
+ @Override
+ public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+ throws IOException {
+ SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()
+ .setSrc(src)
+ .setXAttr(PBHelper.convertXAttrProto(xAttr))
+ .setFlag(PBHelper.convert(flag))
+ .build();
+ try {
+ rpcProxy.setXAttr(null, req);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
+ throws IOException {
+ GetXAttrsRequestProto.Builder builder = GetXAttrsRequestProto.newBuilder();
+ builder.setSrc(src);
+ if (xAttrs != null) {
+ builder.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
+ }
+ GetXAttrsRequestProto req = builder.build();
+ try {
+ return PBHelper.convert(rpcProxy.getXAttrs(null, req));
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void removeXAttr(String src, XAttr xAttr) throws IOException {
+ RemoveXAttrRequestProto req = RemoveXAttrRequestProto
+ .newBuilder().setSrc(src)
+ .setXAttr(PBHelper.convertXAttrProto(xAttr)).build();
+ try {
+ rpcProxy.removeXAttr(null, req);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Wed May 21 13:57:33 2014
@@ -32,6 +32,8 @@ import org.apache.hadoop.fs.ContentSumma
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
@@ -150,6 +152,10 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto;
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -221,6 +227,8 @@ public class PBHelper {
AclEntryType.values();
private static final FsAction[] FSACTION_VALUES =
FsAction.values();
+ private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
+ XAttr.NameSpace.values();
private PBHelper() {
/** Hidden constructor */
@@ -2007,6 +2015,14 @@ public class PBHelper {
private static AclEntryType convert(AclEntryTypeProto v) {
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
}
+
+ private static XAttrNamespaceProto convert(XAttr.NameSpace v) {
+ return XAttrNamespaceProto.valueOf(v.ordinal());
+ }
+
+ private static XAttr.NameSpace convert(XAttrNamespaceProto v) {
+ return castEnum(v, XATTR_NAMESPACE_VALUES);
+ }
private static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
@@ -2060,6 +2076,108 @@ public class PBHelper {
.addAllEntries(convertAclEntryProto(e.getEntries())).build();
return GetAclStatusResponseProto.newBuilder().setResult(r).build();
}
+
+ public static XAttrProto convertXAttrProto(XAttr a) {
+ XAttrProto.Builder builder = XAttrProto.newBuilder();
+ builder.setNamespace(convert(a.getNameSpace()));
+ if (a.getName() != null) {
+ builder.setName(a.getName());
+ }
+ if (a.getValue() != null) {
+ builder.setValue(getByteString(a.getValue()));
+ }
+ return builder.build();
+ }
+
+ public static List<XAttrProto> convertXAttrProto(
+ List<XAttr> xAttrSpec) {
+ ArrayList<XAttrProto> xAttrs = Lists.newArrayListWithCapacity(
+ xAttrSpec.size());
+ for (XAttr a : xAttrSpec) {
+ XAttrProto.Builder builder = XAttrProto.newBuilder();
+ builder.setNamespace(convert(a.getNameSpace()));
+ if (a.getName() != null) {
+ builder.setName(a.getName());
+ }
+ if (a.getValue() != null) {
+ builder.setValue(getByteString(a.getValue()));
+ }
+ xAttrs.add(builder.build());
+ }
+ return xAttrs;
+ }
+
+ /**
+ * The flag field in PB is a bitmask whose values are the same a the
+ * emum values of XAttrSetFlag
+ */
+ public static int convert(EnumSet<XAttrSetFlag> flag) {
+ int value = 0;
+ if (flag.contains(XAttrSetFlag.CREATE)) {
+ value |= XAttrSetFlagProto.XATTR_CREATE.getNumber();
+ }
+ if (flag.contains(XAttrSetFlag.REPLACE)) {
+ value |= XAttrSetFlagProto.XATTR_REPLACE.getNumber();
+ }
+ return value;
+ }
+
+ public static EnumSet<XAttrSetFlag> convert(int flag) {
+ EnumSet<XAttrSetFlag> result =
+ EnumSet.noneOf(XAttrSetFlag.class);
+ if ((flag & XAttrSetFlagProto.XATTR_CREATE_VALUE) ==
+ XAttrSetFlagProto.XATTR_CREATE_VALUE) {
+ result.add(XAttrSetFlag.CREATE);
+ }
+ if ((flag & XAttrSetFlagProto.XATTR_REPLACE_VALUE) ==
+ XAttrSetFlagProto.XATTR_REPLACE_VALUE) {
+ result.add(XAttrSetFlag.REPLACE);
+ }
+ return result;
+ }
+
+ public static XAttr convertXAttr(XAttrProto a) {
+ XAttr.Builder builder = new XAttr.Builder();
+ builder.setNameSpace(convert(a.getNamespace()));
+ if (a.hasName()) {
+ builder.setName(a.getName());
+ }
+ if (a.hasValue()) {
+ builder.setValue(a.getValue().toByteArray());
+ }
+ return builder.build();
+ }
+
+ public static List<XAttr> convertXAttrs(List<XAttrProto> xAttrSpec) {
+ ArrayList<XAttr> xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size());
+ for (XAttrProto a : xAttrSpec) {
+ XAttr.Builder builder = new XAttr.Builder();
+ builder.setNameSpace(convert(a.getNamespace()));
+ if (a.hasName()) {
+ builder.setName(a.getName());
+ }
+ if (a.hasValue()) {
+ builder.setValue(a.getValue().toByteArray());
+ }
+ xAttrs.add(builder.build());
+ }
+ return xAttrs;
+ }
+
+ public static List<XAttr> convert(GetXAttrsResponseProto a) {
+ List<XAttrProto> xAttrs = a.getXAttrsList();
+ return convertXAttrs(xAttrs);
+ }
+
+ public static GetXAttrsResponseProto convertXAttrsResponse(
+ List<XAttr> xAttrs) {
+ GetXAttrsResponseProto.Builder builder = GetXAttrsResponseProto
+ .newBuilder();
+ if (xAttrs != null) {
+ builder.addAllXAttrs(convertXAttrProto(xAttrs));
+ }
+ return builder.build();
+ }
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
return ShortCircuitShmSlotProto.newBuilder().
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed May 21 13:57:33 2014
@@ -24,6 +24,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.EnumSet;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
@@ -39,6 +40,8 @@ import org.apache.hadoop.fs.ParentNotDir
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -47,6 +50,7 @@ import org.apache.hadoop.fs.permission.P
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -79,6 +83,7 @@ import org.apache.hadoop.hdfs.util.ReadO
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
/*************************************************
* FSDirectory stores the filesystem directory state.
@@ -125,6 +130,7 @@ public class FSDirectory implements Clos
private final int contentCountLimit; // max content summary counts per run
private final INodeMap inodeMap; // Synchronized by dirLock
private long yieldCount = 0; // keep track of lock yield count.
+ private final int inodeXAttrsLimit; //inode xattrs max limit
// lock to protect the directory and BlockMap
private final ReentrantReadWriteLock dirLock;
@@ -190,6 +196,12 @@ public class FSDirectory implements Clos
this.maxDirItems = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
+ this.inodeXAttrsLimit = conf.getInt(
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
+ Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
+ "Cannot set a negative limit on the number of xattrs per inode (%s).",
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
// We need a maximum maximum because by default, PB limits message sizes
// to 64MB. This means we can only store approximately 6.7 million entries
// per directory, but let's use 6.4 million for some safety.
@@ -2856,6 +2868,116 @@ public class FSDirectory implements Clos
readUnlock();
}
}
+
+ void removeXAttr(String src, XAttr xAttr) throws IOException {
+ writeLock();
+ try {
+ XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr);
+ if (removedXAttr != null) {
+ fsImage.getEditLog().logRemoveXAttr(src, removedXAttr);
+ } else {
+ NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " +
+ XAttrHelper.getPrefixName(xAttr) +
+ " does not exist on the path " + src);
+ }
+ } finally {
+ writeUnlock();
+ }
+ }
+
+ XAttr unprotectedRemoveXAttr(String src,
+ XAttr xAttr) throws IOException {
+ assert hasWriteLock();
+ INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
+ INode inode = resolveLastINode(src, iip);
+ int snapshotId = iip.getLatestSnapshotId();
+ List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+ List<XAttr> newXAttrs = filterINodeXAttr(existingXAttrs, xAttr);
+ if (existingXAttrs.size() != newXAttrs.size()) {
+ XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
+ return xAttr;
+ }
+ return null;
+ }
+
+ List<XAttr> filterINodeXAttr(List<XAttr> existingXAttrs,
+ XAttr xAttr) throws QuotaExceededException {
+ if (existingXAttrs == null || existingXAttrs.isEmpty()) {
+ return existingXAttrs;
+ }
+
+ List<XAttr> xAttrs = Lists.newArrayListWithCapacity(existingXAttrs.size());
+ for (XAttr a : existingXAttrs) {
+ if (!(a.getNameSpace() == xAttr.getNameSpace()
+ && a.getName().equals(xAttr.getName()))) {
+ xAttrs.add(a);
+ }
+ }
+
+ return xAttrs;
+ }
+
+ void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
+ boolean logRetryCache) throws IOException {
+ writeLock();
+ try {
+ unprotectedSetXAttr(src, xAttr, flag);
+ fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache);
+ } finally {
+ writeUnlock();
+ }
+ }
+
+ void unprotectedSetXAttr(String src, XAttr xAttr,
+ EnumSet<XAttrSetFlag> flag) throws IOException {
+ assert hasWriteLock();
+ INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
+ INode inode = resolveLastINode(src, iip);
+ int snapshotId = iip.getLatestSnapshotId();
+ List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+ List<XAttr> newXAttrs = setINodeXAttr(existingXAttrs, xAttr, flag);
+ XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
+ }
+
+ List<XAttr> setINodeXAttr(List<XAttr> existingXAttrs, XAttr xAttr,
+ EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException {
+ List<XAttr> xAttrs = Lists.newArrayListWithCapacity(
+ existingXAttrs != null ? existingXAttrs.size() + 1 : 1);
+ boolean exist = false;
+ if (existingXAttrs != null) {
+ for (XAttr a: existingXAttrs) {
+ if ((a.getNameSpace() == xAttr.getNameSpace()
+ && a.getName().equals(xAttr.getName()))) {
+ exist = true;
+ } else {
+ xAttrs.add(a);
+ }
+ }
+ }
+
+ XAttrSetFlag.validate(xAttr.getName(), exist, flag);
+ xAttrs.add(xAttr);
+
+ if (xAttrs.size() > inodeXAttrsLimit) {
+ throw new IOException("Cannot add additional XAttr to inode, "
+ + "would exceed limit of " + inodeXAttrsLimit);
+ }
+
+ return xAttrs;
+ }
+
+ List<XAttr> getXAttrs(String src) throws IOException {
+ String srcs = normalizePath(src);
+ readLock();
+ try {
+ INodesInPath iip = getLastINodeInPath(srcs, true);
+ INode inode = resolveLastINode(src, iip);
+ int snapshotId = iip.getPathSnapshotId();
+ return XAttrStorage.readINodeXAttrs(inode, snapshotId);
+ } finally {
+ readUnlock();
+ }
+ }
private static INode resolveLastINode(String src, INodesInPath iip)
throws FileNotFoundException {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed May 21 13:57:33 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@@ -69,6 +70,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -80,6 +82,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
@@ -1050,6 +1053,21 @@ public class FSEditLog implements LogsPu
op.aclEntries = entries;
logEdit(op);
}
+
+ void logSetXAttr(String src, XAttr xAttr, boolean toLogRpcIds) {
+ final SetXAttrOp op = SetXAttrOp.getInstance();
+ op.src = src;
+ op.xAttr = xAttr;
+ logRpcIds(op, toLogRpcIds);
+ logEdit(op);
+ }
+
+ void logRemoveXAttr(String src, XAttr xAttr) {
+ final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
+ op.src = src;
+ op.xAttr = xAttr;
+ logEdit(op);
+ }
/**
* Get all the journals this edit log is currently operating on.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Wed May 21 13:57:33 2014
@@ -25,12 +25,14 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.EnumMap;
+import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -76,6 +78,8 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
@@ -798,6 +802,20 @@ public class FSEditLogLoader {
fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries);
break;
}
+ case OP_SET_XATTR: {
+ SetXAttrOp setXAttrOp = (SetXAttrOp) op;
+ fsDir.unprotectedSetXAttr(setXAttrOp.src, setXAttrOp.xAttr,
+ EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+ if (toAddRetryCache) {
+ fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId);
+ }
+ break;
+ }
+ case OP_REMOVE_XATTR: {
+ RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
+ fsDir.unprotectedRemoveXAttr(removeXAttrOp.src, removeXAttrOp.xAttr);
+ break;
+ }
default:
throw new IOException("Invalid operation read " + op.opCode);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Wed May 21 13:57:33 2014
@@ -54,6 +54,8 @@ import static org.apache.hadoop.hdfs.ser
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_PERMISSIONS;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_QUOTA;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_REPLICATION;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XATTR;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_XATTR;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES;
@@ -79,12 +81,14 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -95,6 +99,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.util.XMLUtils;
@@ -186,6 +191,8 @@ public abstract class FSEditLogOp {
OP_ROLLING_UPGRADE_START, "start"));
inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp(
OP_ROLLING_UPGRADE_FINALIZE, "finalize"));
+ inst.put(OP_SET_XATTR, new SetXAttrOp());
+ inst.put(OP_REMOVE_XATTR, new RemoveXAttrOp());
}
public FSEditLogOp get(FSEditLogOpCodes opcode) {
@@ -3490,6 +3497,95 @@ public abstract class FSEditLogOp {
return builder.toString();
}
}
+
+ static class RemoveXAttrOp extends FSEditLogOp {
+ XAttr xAttr;
+ String src;
+
+ private RemoveXAttrOp() {
+ super(OP_REMOVE_XATTR);
+ }
+
+ static RemoveXAttrOp getInstance() {
+ return new RemoveXAttrOp();
+ }
+
+ @Override
+ void readFields(DataInputStream in, int logVersion) throws IOException {
+ XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
+ src = p.getSrc();
+ xAttr = PBHelper.convertXAttr(p.getXAttr());
+ }
+
+ @Override
+ public void writeFields(DataOutputStream out) throws IOException {
+ XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
+ if (src != null) {
+ b.setSrc(src);
+ }
+ b.setXAttr(PBHelper.convertXAttrProto(xAttr));
+ b.build().writeDelimitedTo(out);
+ }
+
+ @Override
+ protected void toXml(ContentHandler contentHandler) throws SAXException {
+ XMLUtils.addSaxString(contentHandler, "SRC", src);
+ appendXAttrToXml(contentHandler, xAttr);
+ }
+
+ @Override
+ void fromXml(Stanza st) throws InvalidXmlException {
+ src = st.getValue("SRC");
+ xAttr = readXAttrFromXml(st);
+ }
+ }
+
+ static class SetXAttrOp extends FSEditLogOp {
+ XAttr xAttr;
+ String src;
+
+ private SetXAttrOp() {
+ super(OP_SET_XATTR);
+ }
+
+ static SetXAttrOp getInstance() {
+ return new SetXAttrOp();
+ }
+
+ @Override
+ void readFields(DataInputStream in, int logVersion) throws IOException {
+ XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
+ src = p.getSrc();
+ xAttr = PBHelper.convertXAttr(p.getXAttr());
+ readRpcIds(in, logVersion);
+ }
+
+ @Override
+ public void writeFields(DataOutputStream out) throws IOException {
+ XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
+ if (src != null) {
+ b.setSrc(src);
+ }
+ b.setXAttr(PBHelper.convertXAttrProto(xAttr));
+ b.build().writeDelimitedTo(out);
+ // clientId and callId
+ writeRpcIds(rpcClientId, rpcCallId, out);
+ }
+
+ @Override
+ protected void toXml(ContentHandler contentHandler) throws SAXException {
+ XMLUtils.addSaxString(contentHandler, "SRC", src);
+ appendXAttrToXml(contentHandler, xAttr);
+ appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
+ }
+
+ @Override
+ void fromXml(Stanza st) throws InvalidXmlException {
+ src = st.getValue("SRC");
+ xAttr = readXAttrFromXml(st);
+ readRpcIdsFromXml(st);
+ }
+ }
static class SetAclOp extends FSEditLogOp {
List<AclEntry> aclEntries = Lists.newArrayList();
@@ -4106,4 +4202,42 @@ public abstract class FSEditLogOp {
}
return aclEntries;
}
+
+ private static void appendXAttrToXml(ContentHandler contentHandler,
+ XAttr xAttr) throws SAXException {
+ contentHandler.startElement("", "", "XATTR", new AttributesImpl());
+ XMLUtils.addSaxString(contentHandler, "NAMESPACE",
+ xAttr.getNameSpace().toString());
+ XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName());
+ if (xAttr.getValue() != null) {
+ try {
+ XMLUtils.addSaxString(contentHandler, "VALUE",
+ XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX));
+ } catch (IOException e) {
+ throw new SAXException(e);
+ }
+ }
+ contentHandler.endElement("", "", "XATTR");
+ }
+
+ private static XAttr readXAttrFromXml(Stanza st)
+ throws InvalidXmlException {
+ if (!st.hasChildren("XATTR")) {
+ return null;
+ }
+
+ Stanza a = st.getChildren("XATTR").get(0);
+ XAttr.Builder builder = new XAttr.Builder();
+ builder.setNameSpace(XAttr.NameSpace.valueOf(a.getValue("NAMESPACE"))).
+ setName(a.getValue("NAME"));
+ String v = a.getValueOrNull("VALUE");
+ if (v != null) {
+ try {
+ builder.setValue(XAttrCodec.decodeValue(v));
+ } catch (IOException e) {
+ throw new InvalidXmlException(e.toString());
+ }
+ }
+ return builder.build();
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java Wed May 21 13:57:33 2014
@@ -70,6 +70,8 @@ public enum FSEditLogOpCodes {
OP_SET_ACL ((byte) 40),
OP_ROLLING_UPGRADE_START ((byte) 41),
OP_ROLLING_UPGRADE_FINALIZE ((byte) 42),
+ OP_SET_XATTR ((byte) 43),
+ OP_REMOVE_XATTR ((byte) 44),
// Note that the current range of the valid OP code is 0~127
OP_INVALID ((byte) -1);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Wed May 21 13:57:33 2014
@@ -877,7 +877,7 @@ public class FSImageFormat {
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
- accessTime, replication, preferredBlockSize);
+ accessTime, replication, preferredBlockSize, null);
}
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
@@ -897,10 +897,10 @@ public class FSImageFormat {
final long nsQuota = in.readLong();
final long dsQuota = in.readLong();
- return nsQuota == -1L && dsQuota == -1L?
- new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime)
+ return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
+ name, permissions, null, modificationTime, null)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
- null, modificationTime, nsQuota, dsQuota);
+ null, modificationTime, nsQuota, dsQuota, null);
}
private void loadFilesUnderConstruction(DataInput in,
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java Wed May 21 13:57:33 2014
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.A
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
@@ -49,7 +50,10 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
+import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
@@ -74,6 +78,14 @@ public final class FSImageFormatPBINode
.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
+
+ private static final int XATTR_NAMESPACE_MASK = 3;
+ private static final int XATTR_NAMESPACE_OFFSET = 30;
+ private static final int XATTR_NAME_MASK = (1 << 24) - 1;
+ private static final int XATTR_NAME_OFFSET = 6;
+ private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
+ XAttr.NameSpace.values();
+
private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
@@ -103,6 +115,25 @@ public final class FSImageFormatPBINode
}
return b.build();
}
+
+ public static ImmutableList<XAttr> loadXAttrs(
+ XAttrFeatureProto proto, final String[] stringTable) {
+ ImmutableList.Builder<XAttr> b = ImmutableList.builder();
+ for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) {
+ int v = xAttrCompactProto.getName();
+ int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK;
+ int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
+ String name = stringTable[nid];
+ byte[] value = null;
+ if (xAttrCompactProto.getValue() != null) {
+ value = xAttrCompactProto.getValue().toByteArray();
+ }
+ b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns])
+ .setName(name).setValue(value).build());
+ }
+
+ return b.build();
+ }
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
LoaderContext state) {
@@ -123,6 +154,10 @@ public final class FSImageFormatPBINode
dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
state.getStringTable())));
}
+ if (d.hasXAttrs()) {
+ dir.addXAttrFeature(new XAttrFeature(
+ loadXAttrs(d.getXAttrs(), state.getStringTable())));
+ }
return dir;
}
@@ -255,6 +290,11 @@ public final class FSImageFormatPBINode
file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
state.getStringTable())));
}
+
+ if (f.hasXAttrs()) {
+ file.addXAttrFeature(new XAttrFeature(
+ loadXAttrs(f.getXAttrs(), state.getStringTable())));
+ }
// under-construction information
if (f.hasFileUC()) {
@@ -295,6 +335,11 @@ public final class FSImageFormatPBINode
}
dir.rootDir.cloneModificationTime(root);
dir.rootDir.clonePermissionStatus(root);
+ // root dir supports having extended attributes according to POSIX
+ final XAttrFeature f = root.getXAttrFeature();
+ if (f != null) {
+ dir.rootDir.addXAttrFeature(f);
+ }
}
}
@@ -320,6 +365,26 @@ public final class FSImageFormatPBINode
}
return b;
}
+
+ private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f,
+ final SaverContext.DeduplicationMap<String> stringMap) {
+ XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder();
+ for (XAttr a : f.getXAttrs()) {
+ XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
+ newBuilder();
+ int v = ((a.getNameSpace().ordinal() & XATTR_NAMESPACE_MASK) <<
+ XATTR_NAMESPACE_OFFSET)
+ | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) <<
+ XATTR_NAME_OFFSET);
+ xAttrCompactBuilder.setName(v);
+ if (a.getValue() != null) {
+ xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue()));
+ }
+ b.addXAttrs(xAttrCompactBuilder.build());
+ }
+
+ return b;
+ }
public static INodeSection.INodeFile.Builder buildINodeFile(
INodeFileAttributes file, final SaverContext state) {
@@ -334,6 +399,10 @@ public final class FSImageFormatPBINode
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
+ XAttrFeature xAttrFeature = file.getXAttrFeature();
+ if (xAttrFeature != null) {
+ b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
+ }
return b;
}
@@ -350,6 +419,10 @@ public final class FSImageFormatPBINode
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
+ XAttrFeature xAttrFeature = dir.getXAttrFeature();
+ if (xAttrFeature != null) {
+ b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
+ }
return b;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1596575&r1=1596574&r2=1596575&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed May 21 13:57:33 2014
@@ -131,6 +131,8 @@ import org.apache.hadoop.fs.ParentNotDir
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -508,7 +510,7 @@ public class FSNamesystem implements Nam
private final RetryCache retryCache;
- private final AclConfigFlag aclConfigFlag;
+ private final NNConf nnConf;
/**
* Set the last allocated inode id when fsimage or editlog is loaded.
@@ -775,7 +777,7 @@ public class FSNamesystem implements Nam
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
auditLoggers.get(0) instanceof DefaultAuditLogger;
this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
- this.aclConfigFlag = new AclConfigFlag(conf);
+ this.nnConf = new NNConf(conf);
} catch(IOException e) {
LOG.error(getClass().getSimpleName() + " initialization failed.", e);
close();
@@ -1112,8 +1114,10 @@ public class FSNamesystem implements Nam
// so that the tailer starts from the right spot.
dir.fsImage.updateLastAppliedTxIdFromWritten();
}
- cacheManager.stopMonitorThread();
- cacheManager.clearDirectiveStats();
+ if (cacheManager != null) {
+ cacheManager.stopMonitorThread();
+ cacheManager.clearDirectiveStats();
+ }
blockManager.getDatanodeManager().clearPendingCachingCommands();
blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
// Don't want to keep replication queues when not in Active.
@@ -7694,7 +7698,7 @@ public class FSNamesystem implements Nam
}
void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
- aclConfigFlag.checkForApiCall();
+ nnConf.checkAclsConfigFlag();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
@@ -7715,7 +7719,7 @@ public class FSNamesystem implements Nam
}
void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
- aclConfigFlag.checkForApiCall();
+ nnConf.checkAclsConfigFlag();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
@@ -7736,7 +7740,7 @@ public class FSNamesystem implements Nam
}
void removeDefaultAcl(String src) throws IOException {
- aclConfigFlag.checkForApiCall();
+ nnConf.checkAclsConfigFlag();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
@@ -7757,7 +7761,7 @@ public class FSNamesystem implements Nam
}
void removeAcl(String src) throws IOException {
- aclConfigFlag.checkForApiCall();
+ nnConf.checkAclsConfigFlag();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
@@ -7778,7 +7782,7 @@ public class FSNamesystem implements Nam
}
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
- aclConfigFlag.checkForApiCall();
+ nnConf.checkAclsConfigFlag();
HdfsFileStatus resultingStat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
@@ -7799,7 +7803,7 @@ public class FSNamesystem implements Nam
}
AclStatus getAclStatus(String src) throws IOException {
- aclConfigFlag.checkForApiCall();
+ nnConf.checkAclsConfigFlag();
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
readLock();
@@ -7813,6 +7817,167 @@ public class FSNamesystem implements Nam
readUnlock();
}
}
+
+ /**
+ * Set xattr for a file or directory.
+ *
+ * @param src
+ * - path on which it sets the xattr
+ * @param xAttr
+ * - xAttr details to set
+ * @param flag
+ * - xAttrs flags
+ * @throws AccessControlException
+ * @throws SafeModeException
+ * @throws UnresolvedLinkException
+ * @throws IOException
+ */
+ void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+ throws AccessControlException, SafeModeException,
+ UnresolvedLinkException, IOException {
+ CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+ if (cacheEntry != null && cacheEntry.isSuccess()) {
+ return; // Return previous response
+ }
+ boolean success = false;
+ try {
+ setXAttrInt(src, xAttr, flag, cacheEntry != null);
+ success = true;
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "setXAttr", src);
+ throw e;
+ } finally {
+ RetryCache.setState(cacheEntry, success);
+ }
+ }
+
+ private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
+ boolean logRetryCache) throws IOException {
+ nnConf.checkXAttrsConfigFlag();
+ checkXAttrSize(xAttr);
+ HdfsFileStatus resultingStat = null;
+ FSPermissionChecker pc = getPermissionChecker();
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+ checkOperation(OperationCategory.WRITE);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+ writeLock();
+ try {
+ checkOperation(OperationCategory.WRITE);
+ checkNameNodeSafeMode("Cannot set XAttr on " + src);
+ src = FSDirectory.resolvePath(src, pathComponents, dir);
+ if (isPermissionEnabled) {
+ checkOwner(pc, src);
+ checkPathAccess(pc, src, FsAction.WRITE);
+ }
+ dir.setXAttr(src, xAttr, flag, logRetryCache);
+ resultingStat = getAuditFileInfo(src, false);
+ } finally {
+ writeUnlock();
+ }
+ getEditLog().logSync();
+ logAuditEvent(true, "setXAttr", src, null, resultingStat);
+ }
+
+ /**
+ * Verifies that the combined size of the name and value of an xattr is within
+ * the configured limit. Setting a limit of zero disables this check.
+ */
+ private void checkXAttrSize(XAttr xAttr) {
+ if (nnConf.xattrMaxSize == 0) {
+ return;
+ }
+ int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
+ if (xAttr.getValue() != null) {
+ size += xAttr.getValue().length;
+ }
+ if (size > nnConf.xattrMaxSize) {
+ throw new HadoopIllegalArgumentException(
+ "The XAttr is too big. The maximum combined size of the"
+ + " name and value is " + nnConf.xattrMaxSize
+ + ", but the total size is " + size);
+ }
+ }
+
+ List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) throws IOException {
+ nnConf.checkXAttrsConfigFlag();
+ FSPermissionChecker pc = getPermissionChecker();
+ boolean getAll = xAttrs == null || xAttrs.isEmpty();
+ List<XAttr> filteredXAttrs = null;
+ if (!getAll) {
+ filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
+ if (filteredXAttrs.isEmpty()) {
+ return filteredXAttrs;
+ }
+ }
+ checkOperation(OperationCategory.READ);
+ readLock();
+ try {
+ checkOperation(OperationCategory.READ);
+ if (isPermissionEnabled) {
+ checkPathAccess(pc, src, FsAction.READ);
+ }
+ List<XAttr> all = dir.getXAttrs(src);
+ List<XAttr> filteredAll = XAttrPermissionFilter.
+ filterXAttrsForApi(pc, all);
+ if (getAll) {
+ return filteredAll;
+ } else {
+ if (filteredAll == null || filteredAll.isEmpty()) {
+ return null;
+ }
+ List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
+ for (XAttr xAttr : filteredXAttrs) {
+ for (XAttr a : filteredAll) {
+ if (xAttr.getNameSpace() == a.getNameSpace()
+ && xAttr.getName().equals(a.getName())) {
+ toGet.add(a);
+ break;
+ }
+ }
+ }
+ return toGet;
+ }
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "getXAttrs", src);
+ throw e;
+ } finally {
+ readUnlock();
+ }
+ }
+
+ void removeXAttr(String src, XAttr xAttr) throws IOException {
+ nnConf.checkXAttrsConfigFlag();
+ HdfsFileStatus resultingStat = null;
+ FSPermissionChecker pc = getPermissionChecker();
+ try {
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "removeXAttr", src);
+ throw e;
+ }
+ checkOperation(OperationCategory.WRITE);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+ writeLock();
+ try {
+ checkOperation(OperationCategory.WRITE);
+ checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
+ src = FSDirectory.resolvePath(src, pathComponents, dir);
+ if (isPermissionEnabled) {
+ checkOwner(pc, src);
+ checkPathAccess(pc, src, FsAction.WRITE);
+ }
+
+ dir.removeXAttr(src, xAttr);
+ resultingStat = getAuditFileInfo(src, false);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "removeXAttr", src);
+ throw e;
+ } finally {
+ writeUnlock();
+ }
+ getEditLog().logSync();
+ logAuditEvent(true, "removeXAttr", src, null, resultingStat);
+ }
/**
* Default AuditLogger implementation; used when no access logger is
@@ -7898,6 +8063,5 @@ public class FSNamesystem implements Nam
logger.addAppender(asyncAppender);
}
}
-
}