You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by um...@apache.org on 2014/05/22 15:54:58 UTC
svn commit: r1596873 [1/3] - in
/hadoop/common/branches/fs-encryption/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/
hadoop-hdfs/src/contr...
Author: umamahesh
Date: Thu May 22 13:54:53 2014
New Revision: 1596873
URL: http://svn.apache.org/r1596873
Log:
Merge from trunk to fs-encryption branch
Added:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
- copied unchanged from r1596815, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
Removed:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java
Modified:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/vecsum.c
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs:r1588992-1596568
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1595302-1596815
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java Thu May 22 13:54:53 2014
@@ -32,14 +32,14 @@ import org.apache.hadoop.mount.MountdBas
*/
public class Mountd extends MountdBase {
- public Mountd(Configuration config, DatagramSocket registrationSocket)
- throws IOException {
- super(new RpcProgramMountd(config, registrationSocket));
+ public Mountd(Configuration config, DatagramSocket registrationSocket,
+ boolean allowInsecurePorts) throws IOException {
+ super(new RpcProgramMountd(config, registrationSocket, allowInsecurePorts));
}
public static void main(String[] args) throws IOException {
Configuration config = new Configuration();
- Mountd mountd = new Mountd(config, null);
+ Mountd mountd = new Mountd(config, null, true);
mountd.start(true);
}
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Thu May 22 13:54:53 2014
@@ -79,11 +79,11 @@ public class RpcProgramMountd extends Rp
private final NfsExports hostsMatcher;
- public RpcProgramMountd(Configuration config,
- DatagramSocket registrationSocket) throws IOException {
+ public RpcProgramMountd(Configuration config, DatagramSocket registrationSocket,
+ boolean allowInsecurePorts) throws IOException {
// Note that RPC cache is not enabled
super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
- PROGRAM, VERSION_1, VERSION_3, registrationSocket);
+ PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts);
exports = new ArrayList<String>();
exports.add(config.get(Nfs3Constant.EXPORT_POINT,
Nfs3Constant.EXPORT_POINT_DEFAULT));
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java Thu May 22 13:54:53 2014
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.net.DatagramSocket;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.nfs.mount.Mountd;
import org.apache.hadoop.nfs.nfs3.Nfs3Base;
import org.apache.hadoop.util.StringUtils;
@@ -41,12 +42,13 @@ public class Nfs3 extends Nfs3Base {
}
public Nfs3(Configuration conf) throws IOException {
- this(conf, null);
+ this(conf, null, true);
}
- public Nfs3(Configuration conf, DatagramSocket registrationSocket) throws IOException {
- super(new RpcProgramNfs3(conf, registrationSocket), conf);
- mountd = new Mountd(conf, registrationSocket);
+ public Nfs3(Configuration conf, DatagramSocket registrationSocket,
+ boolean allowInsecurePorts) throws IOException {
+ super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf);
+ mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
}
public Mountd getMountd() {
@@ -61,8 +63,13 @@ public class Nfs3 extends Nfs3Base {
static void startService(String[] args,
DatagramSocket registrationSocket) throws IOException {
- StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
- final Nfs3 nfsServer = new Nfs3(new Configuration(), registrationSocket);
+ StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
+ Configuration conf = new Configuration();
+ boolean allowInsecurePorts = conf.getBoolean(
+ DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_KEY,
+ DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT);
+ final Nfs3 nfsServer = new Nfs3(new Configuration(), registrationSocket,
+ allowInsecurePorts);
nfsServer.startServiceInternal(true);
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Thu May 22 13:54:53 2014
@@ -166,11 +166,12 @@ public class RpcProgramNfs3 extends RpcP
private final RpcCallCache rpcCallCache;
- public RpcProgramNfs3(Configuration config, DatagramSocket registrationSocket)
- throws IOException {
+ public RpcProgramNfs3(Configuration config, DatagramSocket registrationSocket,
+ boolean allowInsecurePorts) throws IOException {
super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
- Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket);
+ Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket,
+ allowInsecurePorts);
config.set(FsPermission.UMASK_LABEL, "000");
iug = new IdUserGroup();
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu May 22 13:54:53 2014
@@ -254,6 +254,69 @@ Trunk (Unreleased)
HDFS-5794. Fix the inconsistency of layout version number of
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
+ BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
+
+ HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
+
+ HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
+
+ HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
+
+ HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
+
+ HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
+
+ HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
+
+ HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
+ (Yi Liu via umamahesh)
+
+ HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
+
+ HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
+
+ HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
+
+ HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
+
+ HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
+
+ HDFS-6283. Write end user documentation for xattrs. (wang)
+
+ HDFS-6412. Interface audience and stability annotations missing from
+ several new classes related to xattrs. (wang)
+
+ HDFS-6259. Support extended attributes via WebHDFS. (yliu)
+
+ HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
+ (Yi Liu via umamahesh)
+
+ HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
+ (umamahesh via wang)
+
+ HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
+
+ HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
+
+ HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
+
+ HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
+ (umamahesh)
+
+ HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
+
+ HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
+ methods cannot throw AclException. (wang)
+
+ HDFS-6413. xattr names erroneously handled as case-insensitive.
+ (Charles Lamb via cnauroth)
+
+ HDFS-6414. xattr modification operations are based on state of latest
+ snapshot instead of current version of inode. (Andrew Wang via cnauroth)
+
+ HDFS-6374. setXAttr should require the user to be the owner of the file
+ or directory (Charles Lamb via wang)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -273,6 +336,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6334. Client failover proxy provider for IP failover based NN HA.
(kihwal)
+ HDFS-6406. Add capability for NFS gateway to reject connections from
+ unprivileged ports. (atm)
+
IMPROVEMENTS
HDFS-6007. Update documentation about short-circuit local reads (iwasakims
@@ -369,6 +435,11 @@ Release 2.5.0 - UNRELEASED
HDFS-6345. DFS.listCacheDirectives() should allow filtering based on
cache directive ID. (wang)
+ HDFS-6432. Add snapshot related APIs to webhdfs. (jing9)
+
+ HDFS-6396. Remove support for ACL feature from INodeSymlink.
+ (Charles Lamb via wang)
+
OPTIMIZATIONS
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -492,6 +563,26 @@ Release 2.5.0 - UNRELEASED
HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality
(Binglin Chang and Chen He via junping_du)
+ HDFS-4913. Deleting file through fuse-dfs when using trash fails requiring
+ root permissions (cmccabe)
+
+ HDFS-6421. Fix vecsum.c compile on BSD and some other systems. (Mit Desai
+ via Colin Patrick McCabe)
+
+ HDFS-6419. TestBookKeeperHACheckpoints#TestSBNCheckpoints fails on trunk.
+ (Akira AJISAKA via kihwal)
+
+ HDFS-6409. Fix typo in log message about NameNode layout version upgrade.
+ (Chen He via cnauroth)
+
+ HDFS-6433. Replace BytesMoved class with AtomicLong.
+ (Benoy Antony via cnauroth)
+
+ HDFS-6438. DeleteSnapshot should be a DELETE request in WebHdfs. (jing9)
+
+ HDFS-6423. Diskspace quota usage should be updated when appending data to
+ partial block. (jing9)
+
Release 2.4.1 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -569,6 +660,12 @@ Release 2.4.1 - UNRELEASED
HDFS-6402. Suppress findbugs warning for failure to override equals and
hashCode in FsAclPermission. (cnauroth)
+ HDFS-6325. Append should fail if the last block has insufficient number of
+ replicas (Keith Pak via cos)
+
+ HDFS-6397. NN shows inconsistent value in deadnode count.
+ (Mohammad Kamrul Islam via kihwal)
+
Release 2.4.0 - 2014-04-07
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu May 22 13:54:53 2014
@@ -290,6 +290,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
<include>NamenodeProtocol.proto</include>
<include>QJournalProtocol.proto</include>
<include>acl.proto</include>
+ <include>xattr.proto</include>
<include>datatransfer.proto</include>
<include>fsimage.proto</include>
<include>hdfs.proto</include>
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java Thu May 22 13:54:53 2014
@@ -23,13 +23,10 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints;
-import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
-import com.google.common.collect.ImmutableList;
-
/**
* Runs the same tests as TestStandbyCheckpoints, but
* using a bookkeeper journal manager as the shared directory
@@ -43,19 +40,11 @@ public class TestBookKeeperHACheckpoints
@Override
@Before
public void setupCluster() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
- conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+ Configuration conf = setupCommonConfig();
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
.toString());
BKJMUtil.addJournalManagerDefinition(conf);
- conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
- conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, SlowCodec.class
- .getCanonicalName());
- CompressionCodecFactory.setCodecClasses(conf, ImmutableList
- .<Class> of(SlowCodec.class));
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1595302-1596815
Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1588992-1596568
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java Thu May 22 13:54:53 2014
@@ -25,6 +25,7 @@ import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
+import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -414,6 +415,33 @@ public class Hdfs extends AbstractFileSy
public AclStatus getAclStatus(Path path) throws IOException {
return dfs.getAclStatus(getUriPath(path));
}
+
+ @Override
+ public void setXAttr(Path path, String name, byte[] value,
+ EnumSet<XAttrSetFlag> flag) throws IOException {
+ dfs.setXAttr(getUriPath(path), name, value, flag);
+ }
+
+ @Override
+ public byte[] getXAttr(Path path, String name) throws IOException {
+ return dfs.getXAttr(getUriPath(path), name);
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+ return dfs.getXAttrs(getUriPath(path));
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+ throws IOException {
+ return dfs.getXAttrs(getUriPath(path), names);
+ }
+
+ @Override
+ public void removeXAttr(Path path, String name) throws IOException {
+ dfs.removeXAttr(getUriPath(path), name);
+ }
/**
* Renew an existing delegation token.
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Thu May 22 13:54:53 2014
@@ -109,6 +109,8 @@ import org.apache.hadoop.fs.MD5MD5CRC32C
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
@@ -2757,6 +2759,72 @@ public class DFSClient implements java.i
UnresolvedPathException.class);
}
}
+
+ public void setXAttr(String src, String name, byte[] value,
+ EnumSet<XAttrSetFlag> flag) throws IOException {
+ checkOpen();
+ try {
+ namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ NSQuotaExceededException.class,
+ SafeModeException.class,
+ SnapshotAccessControlException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public byte[] getXAttr(String src, String name) throws IOException {
+ checkOpen();
+ try {
+ final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
+ final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
+ return XAttrHelper.getFirstXAttrValue(result);
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public Map<String, byte[]> getXAttrs(String src) throws IOException {
+ checkOpen();
+ try {
+ return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public Map<String, byte[]> getXAttrs(String src, List<String> names)
+ throws IOException {
+ checkOpen();
+ try {
+ return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
+ src, XAttrHelper.buildXAttrs(names)));
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
+ public void removeXAttr(String src, String name) throws IOException {
+ checkOpen();
+ try {
+ namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ NSQuotaExceededException.class,
+ SafeModeException.class,
+ SnapshotAccessControlException.class,
+ UnresolvedPathException.class);
+ }
+ }
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu May 22 13:54:53 2014
@@ -192,6 +192,8 @@ public class DFSConfigKeys extends Commo
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled";
public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false;
+ public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled";
+ public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
public static final String DFS_ADMIN = "dfs.cluster.administrators";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
@@ -295,6 +297,11 @@ public class DFSConfigKeys extends Commo
public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file";
public static final long DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 1024*1024;
+ public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode";
+ public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
+ public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";
+ public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
+
//Following keys have no defaults
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
@@ -631,9 +638,12 @@ public class DFSConfigKeys extends Commo
public static final String DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE =
"dfs.client.hedged.read.threadpool.size";
- public static final int DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
- public static final String DFS_NFS_KEYTAB_FILE_KEY = "dfs.nfs.keytab.file";
- public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "dfs.nfs.kerberos.principal";
- public static final String DFS_NFS_REGISTRATION_PORT_KEY = "dfs.nfs.registration.port";
- public static final int DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
+ public static final int DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
+ public static final String DFS_NFS_KEYTAB_FILE_KEY = "dfs.nfs.keytab.file";
+ public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "dfs.nfs.kerberos.principal";
+ public static final String DFS_NFS_REGISTRATION_PORT_KEY = "dfs.nfs.registration.port";
+ public static final int DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
+ public static final String DFS_NFS_ALLOW_INSECURE_PORTS_KEY = "dfs.nfs.allow.insecure.ports";
+ public static final boolean DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT = true;
+
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu May 22 13:54:53 2014
@@ -25,6 +25,7 @@ import java.net.URI;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsServerDefa
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
@@ -1769,4 +1771,91 @@ public class DistributedFileSystem exten
}
}.resolve(this, absF);
}
+
+ @Override
+ public void setXAttr(Path path, final String name, final byte[] value,
+ final EnumSet<XAttrSetFlag> flag) throws IOException {
+ Path absF = fixRelativePart(path);
+ new FileSystemLinkResolver<Void>() {
+
+ @Override
+ public Void doCall(final Path p) throws IOException {
+ dfs.setXAttr(getPathName(p), name, value, flag);
+ return null;
+ }
+
+ @Override
+ public Void next(final FileSystem fs, final Path p) throws IOException {
+ fs.setXAttr(p, name, value, flag);
+ return null;
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public byte[] getXAttr(Path path, final String name) throws IOException {
+ final Path absF = fixRelativePart(path);
+ return new FileSystemLinkResolver<byte[]>() {
+ @Override
+ public byte[] doCall(final Path p) throws IOException {
+ return dfs.getXAttr(getPathName(p), name);
+ }
+ @Override
+ public byte[] next(final FileSystem fs, final Path p)
+ throws IOException, UnresolvedLinkException {
+ return fs.getXAttr(p, name);
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+ final Path absF = fixRelativePart(path);
+ return new FileSystemLinkResolver<Map<String, byte[]>>() {
+ @Override
+ public Map<String, byte[]> doCall(final Path p) throws IOException {
+ return dfs.getXAttrs(getPathName(p));
+ }
+ @Override
+ public Map<String, byte[]> next(final FileSystem fs, final Path p)
+ throws IOException, UnresolvedLinkException {
+ return fs.getXAttrs(p);
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
+ throws IOException {
+ final Path absF = fixRelativePart(path);
+ return new FileSystemLinkResolver<Map<String, byte[]>>() {
+ @Override
+ public Map<String, byte[]> doCall(final Path p) throws IOException {
+ return dfs.getXAttrs(getPathName(p), names);
+ }
+ @Override
+ public Map<String, byte[]> next(final FileSystem fs, final Path p)
+ throws IOException, UnresolvedLinkException {
+ return fs.getXAttrs(p, names);
+ }
+ }.resolve(this, absF);
+ }
+
+ @Override
+ public void removeXAttr(Path path, final String name) throws IOException {
+ Path absF = fixRelativePart(path);
+ new FileSystemLinkResolver<Void>() {
+ @Override
+ public Void doCall(final Path p) throws IOException {
+ dfs.removeXAttr(getPathName(p), name);
+ return null;
+ }
+
+ @Override
+ public Void next(final FileSystem fs, final Path p) throws IOException {
+ fs.removeXAttr(p, name);
+ return null;
+ }
+ }.resolve(this, absF);
+ }
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu May 22 13:54:53 2014
@@ -31,10 +31,12 @@ import org.apache.hadoop.fs.FileAlreadyE
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -1254,4 +1256,66 @@ public interface ClientProtocol {
*/
@Idempotent
public AclStatus getAclStatus(String src) throws IOException;
+
+ /**
+ * Set xattr of a file or directory.
+ * A regular user only can set xattr of "user" namespace.
+ * A super user can set xattr of "user" and "trusted" namespace.
+ * XAttr of "security" and "system" namespace is only used/exposed
+ * internally to the FS impl.
+ * <p/>
+ * For xattr of "user" namespace, its access permissions are
+ * defined by the file or directory permission bits.
+ * XAttr will be set only when login user has correct permissions.
+ * <p/>
+ * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+ * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+ * @param src file or directory
+ * @param xAttr <code>XAttr</code> to set
+ * @param flag set flag
+ * @throws IOException
+ */
+ @AtMostOnce
+ public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+ throws IOException;
+
+ /**
+ * Get xattrs of file or directory. Values in xAttrs parameter are ignored.
+ * If xattrs is null or empty, equals getting all xattrs of the file or
+ * directory.
+ * Only xattrs which login user has correct permissions will be returned.
+ * <p/>
+ * A regular user only can get xattr of "user" namespace.
+ * A super user can get xattr of "user" and "trusted" namespace.
+ * XAttr of "security" and "system" namespace is only used/exposed
+ * internally to the FS impl.
+ * <p/>
+ * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+ * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+ * @param src file or directory
+ * @param xAttrs xAttrs to get
+ * @return List<XAttr> <code>XAttr</code> list
+ * @throws IOException
+ */
+ @Idempotent
+ public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
+ throws IOException;
+
+ /**
+ * Remove xattr of a file or directory.Value in xAttr parameter is ignored.
+ * Name must be prefixed with user/trusted/security/system.
+ * <p/>
+ * A regular user only can remove xattr of "user" namespace.
+ * A super user can remove xattr of "user" and "trusted" namespace.
+ * XAttr of "security" and "system" namespace is only used/exposed
+ * internally to the FS impl.
+ * <p/>
+ * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+ * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+ * @param src file or directory
+ * @param xAttr <code>XAttr</code> to remove
+ * @throws IOException
+ */
+ @Idempotent
+ public void removeXAttr(String src, XAttr xAttr) throws IOException;
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Thu May 22 13:54:53 2014
@@ -174,6 +174,12 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
@@ -302,6 +308,12 @@ public class ClientNamenodeProtocolServe
private static final RemoveAclResponseProto
VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance();
+
+ private static final SetXAttrResponseProto
+ VOID_SETXATTR_RESPONSE = SetXAttrResponseProto.getDefaultInstance();
+
+ private static final RemoveXAttrResponseProto
+ VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
/**
* Constructor
@@ -1262,4 +1274,38 @@ public class ClientNamenodeProtocolServe
throw new ServiceException(e);
}
}
+
+ @Override
+ public SetXAttrResponseProto setXAttr(RpcController controller,
+ SetXAttrRequestProto req) throws ServiceException {
+ try {
+ server.setXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()),
+ PBHelper.convert(req.getFlag()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return VOID_SETXATTR_RESPONSE;
+ }
+
+ @Override
+ public GetXAttrsResponseProto getXAttrs(RpcController controller,
+ GetXAttrsRequestProto req) throws ServiceException {
+ try {
+ return PBHelper.convertXAttrsResponse(server.getXAttrs(req.getSrc(),
+ PBHelper.convertXAttrs(req.getXAttrsList())));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public RemoveXAttrResponseProto removeXAttr(RpcController controller,
+ RemoveXAttrRequestProto req) throws ServiceException {
+ try {
+ server.removeXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return VOID_REMOVEXATTR_RESPONSE;
+ }
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Thu May 22 13:54:53 2014
@@ -35,6 +35,8 @@ import org.apache.hadoop.fs.FsServerDefa
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -141,6 +143,9 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -1268,4 +1273,47 @@ public class ClientNamenodeProtocolTrans
throw ProtobufHelper.getRemoteException(e);
}
}
+
+ @Override
+ public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+ throws IOException {
+ SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()
+ .setSrc(src)
+ .setXAttr(PBHelper.convertXAttrProto(xAttr))
+ .setFlag(PBHelper.convert(flag))
+ .build();
+ try {
+ rpcProxy.setXAttr(null, req);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
+ throws IOException {
+ GetXAttrsRequestProto.Builder builder = GetXAttrsRequestProto.newBuilder();
+ builder.setSrc(src);
+ if (xAttrs != null) {
+ builder.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
+ }
+ GetXAttrsRequestProto req = builder.build();
+ try {
+ return PBHelper.convert(rpcProxy.getXAttrs(null, req));
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public void removeXAttr(String src, XAttr xAttr) throws IOException {
+ RemoveXAttrRequestProto req = RemoveXAttrRequestProto
+ .newBuilder().setSrc(src)
+ .setXAttr(PBHelper.convertXAttrProto(xAttr)).build();
+ try {
+ rpcProxy.removeXAttr(null, req);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
}
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Thu May 22 13:54:53 2014
@@ -32,6 +32,8 @@ import org.apache.hadoop.fs.ContentSumma
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
@@ -150,6 +152,10 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto;
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -221,6 +227,8 @@ public class PBHelper {
AclEntryType.values();
private static final FsAction[] FSACTION_VALUES =
FsAction.values();
+ private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
+ XAttr.NameSpace.values();
private PBHelper() {
/** Hidden constructor */
@@ -2007,6 +2015,14 @@ public class PBHelper {
private static AclEntryType convert(AclEntryTypeProto v) {
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
}
+
+ private static XAttrNamespaceProto convert(XAttr.NameSpace v) {
+ return XAttrNamespaceProto.valueOf(v.ordinal());
+ }
+
+ private static XAttr.NameSpace convert(XAttrNamespaceProto v) {
+ return castEnum(v, XATTR_NAMESPACE_VALUES);
+ }
private static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
@@ -2060,6 +2076,108 @@ public class PBHelper {
.addAllEntries(convertAclEntryProto(e.getEntries())).build();
return GetAclStatusResponseProto.newBuilder().setResult(r).build();
}
+
+ public static XAttrProto convertXAttrProto(XAttr a) {
+ XAttrProto.Builder builder = XAttrProto.newBuilder();
+ builder.setNamespace(convert(a.getNameSpace()));
+ if (a.getName() != null) {
+ builder.setName(a.getName());
+ }
+ if (a.getValue() != null) {
+ builder.setValue(getByteString(a.getValue()));
+ }
+ return builder.build();
+ }
+
+ public static List<XAttrProto> convertXAttrProto(
+ List<XAttr> xAttrSpec) {
+ ArrayList<XAttrProto> xAttrs = Lists.newArrayListWithCapacity(
+ xAttrSpec.size());
+ for (XAttr a : xAttrSpec) {
+ XAttrProto.Builder builder = XAttrProto.newBuilder();
+ builder.setNamespace(convert(a.getNameSpace()));
+ if (a.getName() != null) {
+ builder.setName(a.getName());
+ }
+ if (a.getValue() != null) {
+ builder.setValue(getByteString(a.getValue()));
+ }
+ xAttrs.add(builder.build());
+ }
+ return xAttrs;
+ }
+
+ /**
+ * The flag field in PB is a bitmask whose values are the same a the
+ * emum values of XAttrSetFlag
+ */
+ public static int convert(EnumSet<XAttrSetFlag> flag) {
+ int value = 0;
+ if (flag.contains(XAttrSetFlag.CREATE)) {
+ value |= XAttrSetFlagProto.XATTR_CREATE.getNumber();
+ }
+ if (flag.contains(XAttrSetFlag.REPLACE)) {
+ value |= XAttrSetFlagProto.XATTR_REPLACE.getNumber();
+ }
+ return value;
+ }
+
+ public static EnumSet<XAttrSetFlag> convert(int flag) {
+ EnumSet<XAttrSetFlag> result =
+ EnumSet.noneOf(XAttrSetFlag.class);
+ if ((flag & XAttrSetFlagProto.XATTR_CREATE_VALUE) ==
+ XAttrSetFlagProto.XATTR_CREATE_VALUE) {
+ result.add(XAttrSetFlag.CREATE);
+ }
+ if ((flag & XAttrSetFlagProto.XATTR_REPLACE_VALUE) ==
+ XAttrSetFlagProto.XATTR_REPLACE_VALUE) {
+ result.add(XAttrSetFlag.REPLACE);
+ }
+ return result;
+ }
+
+ public static XAttr convertXAttr(XAttrProto a) {
+ XAttr.Builder builder = new XAttr.Builder();
+ builder.setNameSpace(convert(a.getNamespace()));
+ if (a.hasName()) {
+ builder.setName(a.getName());
+ }
+ if (a.hasValue()) {
+ builder.setValue(a.getValue().toByteArray());
+ }
+ return builder.build();
+ }
+
+ public static List<XAttr> convertXAttrs(List<XAttrProto> xAttrSpec) {
+ ArrayList<XAttr> xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size());
+ for (XAttrProto a : xAttrSpec) {
+ XAttr.Builder builder = new XAttr.Builder();
+ builder.setNameSpace(convert(a.getNamespace()));
+ if (a.hasName()) {
+ builder.setName(a.getName());
+ }
+ if (a.hasValue()) {
+ builder.setValue(a.getValue().toByteArray());
+ }
+ xAttrs.add(builder.build());
+ }
+ return xAttrs;
+ }
+
+ public static List<XAttr> convert(GetXAttrsResponseProto a) {
+ List<XAttrProto> xAttrs = a.getXAttrsList();
+ return convertXAttrs(xAttrs);
+ }
+
+ public static GetXAttrsResponseProto convertXAttrsResponse(
+ List<XAttr> xAttrs) {
+ GetXAttrsResponseProto.Builder builder = GetXAttrsResponseProto
+ .newBuilder();
+ if (xAttrs != null) {
+ builder.addAllXAttrs(convertXAttrProto(xAttrs));
+ }
+ return builder.build();
+ }
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
return ShortCircuitShmSlotProto.newBuilder().
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Thu May 22 13:54:53 2014
@@ -47,6 +47,7 @@ import java.util.concurrent.ExecutionExc
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -365,7 +366,7 @@ public class Balancer {
sendRequest(out);
receiveResponse(in);
- bytesMoved.inc(block.getNumBytes());
+ bytesMoved.addAndGet(block.getNumBytes());
LOG.info("Successfully moved " + this);
} catch (IOException e) {
LOG.warn("Failed to move " + this + ": " + e.getMessage());
@@ -1111,17 +1112,7 @@ public class Balancer {
return null;
}
- private static class BytesMoved {
- private long bytesMoved = 0L;;
- private synchronized void inc( long bytes ) {
- bytesMoved += bytes;
- }
-
- private synchronized long get() {
- return bytesMoved;
- }
- };
- private final BytesMoved bytesMoved = new BytesMoved();
+ private final AtomicLong bytesMoved = new AtomicLong();
/* Start a thread to dispatch block moves for each source.
* The thread selects blocks to move & sends request to proxy source to
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Thu May 22 13:54:53 2014
@@ -946,6 +946,16 @@ public class BlockManager {
}
/**
+ * Check if a block is replicated to at least the minimum replication.
+ */
+ public boolean isSufficientlyReplicated(BlockInfo b) {
+ // Compare against the lesser of the minReplication and number of live DNs.
+ final int replication =
+ Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes());
+ return countNodes(b).liveReplicas() >= replication;
+ }
+
+ /**
* return a list of blocks & their locations on <code>datanode</code> whose
* total size is <code>size</code>
*
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Thu May 22 13:54:53 2014
@@ -1057,15 +1057,7 @@ public class DatanodeManager {
/** @return the number of dead datanodes. */
public int getNumDeadDataNodes() {
- int numDead = 0;
- synchronized (datanodeMap) {
- for(DatanodeDescriptor dn : datanodeMap.values()) {
- if (isDatanodeDead(dn) ) {
- numDead++;
- }
- }
- }
- return numDead;
+ return getDatanodeListForReport(DatanodeReportType.DEAD).size();
}
/** @return list of datanodes where decommissioning is in progress. */
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu May 22 13:54:53 2014
@@ -24,6 +24,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.EnumSet;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
@@ -39,6 +40,8 @@ import org.apache.hadoop.fs.ParentNotDir
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
@@ -47,6 +50,7 @@ import org.apache.hadoop.fs.permission.P
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -79,6 +83,7 @@ import org.apache.hadoop.hdfs.util.ReadO
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
/*************************************************
* FSDirectory stores the filesystem directory state.
@@ -125,6 +130,7 @@ public class FSDirectory implements Clos
private final int contentCountLimit; // max content summary counts per run
private final INodeMap inodeMap; // Synchronized by dirLock
private long yieldCount = 0; // keep track of lock yield count.
+ private final int inodeXAttrsLimit; //inode xattrs max limit
// lock to protect the directory and BlockMap
private final ReentrantReadWriteLock dirLock;
@@ -190,6 +196,12 @@ public class FSDirectory implements Clos
this.maxDirItems = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
+ this.inodeXAttrsLimit = conf.getInt(
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
+ Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
+ "Cannot set a negative limit on the number of xattrs per inode (%s).",
+ DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
// We need a maximum maximum because by default, PB limits message sizes
// to 64MB. This means we can only store approximately 6.7 million entries
// per directory, but let's use 6.4 million for some safety.
@@ -2856,6 +2868,116 @@ public class FSDirectory implements Clos
readUnlock();
}
}
+
+ void removeXAttr(String src, XAttr xAttr) throws IOException {
+ writeLock();
+ try {
+ XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr);
+ if (removedXAttr != null) {
+ fsImage.getEditLog().logRemoveXAttr(src, removedXAttr);
+ } else {
+ NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " +
+ XAttrHelper.getPrefixName(xAttr) +
+ " does not exist on the path " + src);
+ }
+ } finally {
+ writeUnlock();
+ }
+ }
+
+ XAttr unprotectedRemoveXAttr(String src,
+ XAttr xAttr) throws IOException {
+ assert hasWriteLock();
+ INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
+ INode inode = resolveLastINode(src, iip);
+ int snapshotId = iip.getLatestSnapshotId();
+ List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+ List<XAttr> newXAttrs = filterINodeXAttr(existingXAttrs, xAttr);
+ if (existingXAttrs.size() != newXAttrs.size()) {
+ XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
+ return xAttr;
+ }
+ return null;
+ }
+
+ List<XAttr> filterINodeXAttr(List<XAttr> existingXAttrs,
+ XAttr xAttr) throws QuotaExceededException {
+ if (existingXAttrs == null || existingXAttrs.isEmpty()) {
+ return existingXAttrs;
+ }
+
+ List<XAttr> xAttrs = Lists.newArrayListWithCapacity(existingXAttrs.size());
+ for (XAttr a : existingXAttrs) {
+ if (!(a.getNameSpace() == xAttr.getNameSpace()
+ && a.getName().equals(xAttr.getName()))) {
+ xAttrs.add(a);
+ }
+ }
+
+ return xAttrs;
+ }
+
+ void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
+ boolean logRetryCache) throws IOException {
+ writeLock();
+ try {
+ unprotectedSetXAttr(src, xAttr, flag);
+ fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache);
+ } finally {
+ writeUnlock();
+ }
+ }
+
+ void unprotectedSetXAttr(String src, XAttr xAttr,
+ EnumSet<XAttrSetFlag> flag) throws IOException {
+ assert hasWriteLock();
+ INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
+ INode inode = resolveLastINode(src, iip);
+ int snapshotId = iip.getLatestSnapshotId();
+ List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
+ List<XAttr> newXAttrs = setINodeXAttr(existingXAttrs, xAttr, flag);
+ XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
+ }
+
+ List<XAttr> setINodeXAttr(List<XAttr> existingXAttrs, XAttr xAttr,
+ EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException {
+ List<XAttr> xAttrs = Lists.newArrayListWithCapacity(
+ existingXAttrs != null ? existingXAttrs.size() + 1 : 1);
+ boolean exist = false;
+ if (existingXAttrs != null) {
+ for (XAttr a: existingXAttrs) {
+ if ((a.getNameSpace() == xAttr.getNameSpace()
+ && a.getName().equals(xAttr.getName()))) {
+ exist = true;
+ } else {
+ xAttrs.add(a);
+ }
+ }
+ }
+
+ XAttrSetFlag.validate(xAttr.getName(), exist, flag);
+ xAttrs.add(xAttr);
+
+ if (xAttrs.size() > inodeXAttrsLimit) {
+ throw new IOException("Cannot add additional XAttr to inode, "
+ + "would exceed limit of " + inodeXAttrsLimit);
+ }
+
+ return xAttrs;
+ }
+
+ List<XAttr> getXAttrs(String src) throws IOException {
+ String srcs = normalizePath(src);
+ readLock();
+ try {
+ INodesInPath iip = getLastINodeInPath(srcs, true);
+ INode inode = resolveLastINode(src, iip);
+ int snapshotId = iip.getPathSnapshotId();
+ return XAttrStorage.readINodeXAttrs(inode, snapshotId);
+ } finally {
+ readUnlock();
+ }
+ }
private static INode resolveLastINode(String src, INodesInPath iip)
throws FileNotFoundException {
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu May 22 13:54:53 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@@ -69,6 +70,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -80,6 +82,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
@@ -1050,6 +1053,21 @@ public class FSEditLog implements LogsPu
op.aclEntries = entries;
logEdit(op);
}
+
+ void logSetXAttr(String src, XAttr xAttr, boolean toLogRpcIds) {
+ final SetXAttrOp op = SetXAttrOp.getInstance();
+ op.src = src;
+ op.xAttr = xAttr;
+ logRpcIds(op, toLogRpcIds);
+ logEdit(op);
+ }
+
+ void logRemoveXAttr(String src, XAttr xAttr) {
+ final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
+ op.src = src;
+ op.xAttr = xAttr;
+ logEdit(op);
+ }
/**
* Get all the journals this edit log is currently operating on.
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1596873&r1=1596872&r2=1596873&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Thu May 22 13:54:53 2014
@@ -25,12 +25,14 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.EnumMap;
+import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -76,6 +78,8 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
@@ -798,6 +802,20 @@ public class FSEditLogLoader {
fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries);
break;
}
+ case OP_SET_XATTR: {
+ SetXAttrOp setXAttrOp = (SetXAttrOp) op;
+ fsDir.unprotectedSetXAttr(setXAttrOp.src, setXAttrOp.xAttr,
+ EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+ if (toAddRetryCache) {
+ fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId);
+ }
+ break;
+ }
+ case OP_REMOVE_XATTR: {
+ RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
+ fsDir.unprotectedRemoveXAttr(removeXAttrOp.src, removeXAttrOp.xAttr);
+ break;
+ }
default:
throw new IOException("Invalid operation read " + op.opCode);
}