You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/07/07 22:44:09 UTC

svn commit: r1608603 [1/6] - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-...

Author: wang
Date: Mon Jul  7 20:43:56 2014
New Revision: 1608603

URL: http://svn.apache.org/r1608603
Log:
Merge trunk to branch.

Added:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumSetParam.java
      - copied unchanged from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumSetParam.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
      - copied unchanged from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoXAttrs.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
      - copied unchanged from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
      - copied unchanged from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/
      - copied from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
      - copied unchanged from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
      - copied unchanged from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/
      - copied from r1608600, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/
Removed:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFullPathNameWithSnapshot.java
Modified:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs:r1606534
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1603979-1608600

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml Mon Jul  7 20:43:56 2014
@@ -112,6 +112,10 @@
           <artifactId>servlet-api</artifactId>
         </exclusion>
         <exclusion>
+          <groupId>javax.servlet.jsp</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty</artifactId>
         </exclusion>

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java Mon Jul  7 20:43:56 2014
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs.http.client;
 
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -31,10 +32,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PositionedReadable;
 import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.lib.wsrs.EnumSetParam;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
@@ -46,6 +50,12 @@ import org.apache.hadoop.util.Reflection
 import org.apache.hadoop.util.StringUtils;
 import org.json.simple.JSONArray;
 import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
@@ -96,6 +106,10 @@ public class HttpFSFileSystem extends Fi
   public static final String GROUP_PARAM = "group";
   public static final String MODIFICATION_TIME_PARAM = "modificationtime";
   public static final String ACCESS_TIME_PARAM = "accesstime";
+  public static final String XATTR_NAME_PARAM = "xattr.name";
+  public static final String XATTR_VALUE_PARAM = "xattr.value";
+  public static final String XATTR_SET_FLAG_PARAM = "flag";
+  public static final String XATTR_ENCODING_PARAM = "encoding";
 
   public static final Short DEFAULT_PERMISSION = 0755;
   public static final String ACLSPEC_DEFAULT = "";
@@ -142,6 +156,10 @@ public class HttpFSFileSystem extends Fi
   public static final String MODIFICATION_TIME_JSON = "modificationTime";
   public static final String BLOCK_SIZE_JSON = "blockSize";
   public static final String REPLICATION_JSON = "replication";
+  public static final String XATTRS_JSON = "XAttrs";
+  public static final String XATTR_NAME_JSON = "name";
+  public static final String XATTR_VALUE_JSON = "value";
+  public static final String XATTRNAMES_JSON = "XAttrNames";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -184,7 +202,8 @@ public class HttpFSFileSystem extends Fi
     SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
     MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
     REMOVEDEFAULTACL(HTTP_PUT), REMOVEACL(HTTP_PUT), SETACL(HTTP_PUT),
-    DELETE(HTTP_DELETE);
+    DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
+    REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET);
 
     private String httpMethod;
 
@@ -247,6 +266,31 @@ public class HttpFSFileSystem extends Fi
   private HttpURLConnection getConnection(final String method,
       Map<String, String> params, Path path, boolean makeQualified)
       throws IOException {
+    return getConnection(method, params, null, path, makeQualified);
+  }
+
+  /**
+   * Convenience method that creates a <code>HttpURLConnection</code> for the
+   * HttpFSServer file system operations.
+   * <p/>
+   * This methods performs and injects any needed authentication credentials
+   * via the {@link #getConnection(URL, String)} method
+   *
+   * @param method the HTTP method.
+   * @param params the query string parameters.
+   * @param multiValuedParams multi valued parameters of the query string
+   * @param path the file path
+   * @param makeQualified if the path should be 'makeQualified'
+   *
+   * @return HttpURLConnection a <code>HttpURLConnection</code> for the
+   *         HttpFSServer server, authenticated and ready to use for the
+   *         specified path and file system operation.
+   *
+   * @throws IOException thrown if an IO error occurrs.
+   */
+  private HttpURLConnection getConnection(final String method,
+      Map<String, String> params, Map<String, List<String>> multiValuedParams,
+      Path path, boolean makeQualified) throws IOException {
     if (!realUser.getShortUserName().equals(doAs)) {
       params.put(DO_AS_PARAM, doAs);
     }
@@ -254,7 +298,7 @@ public class HttpFSFileSystem extends Fi
     if (makeQualified) {
       path = makeQualified(path);
     }
-    final URL url = HttpFSUtils.createURL(path, params);
+    final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
     return doAsRealUserIfNecessary(new Callable<HttpURLConnection>() {
       @Override
       public HttpURLConnection call() throws Exception {
@@ -585,7 +629,6 @@ public class HttpFSFileSystem extends Fi
    *
    * @deprecated Use delete(Path, boolean) instead
    */
-  @SuppressWarnings({"deprecation"})
   @Deprecated
   @Override
   public boolean delete(Path f) throws IOException {
@@ -1050,4 +1093,112 @@ public class HttpFSFileSystem extends Fi
     delegationToken = token;
   }
 
+  @Override
+  public void setXAttr(Path f, String name, byte[] value,
+      EnumSet<XAttrSetFlag> flag) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.SETXATTR.toString());
+    params.put(XATTR_NAME_PARAM, name);
+    if (value != null) {
+      params.put(XATTR_VALUE_PARAM, 
+          XAttrCodec.encodeValue(value, XAttrCodec.HEX));
+    }
+    params.put(XATTR_SET_FLAG_PARAM, EnumSetParam.toString(flag));
+    HttpURLConnection conn = getConnection(Operation.SETXATTR.getMethod(),
+        params, f, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  @Override
+  public byte[] getXAttr(Path f, String name) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.GETXATTRS.toString());
+    params.put(XATTR_NAME_PARAM, name);
+    HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
+        params, f, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    Map<String, byte[]> xAttrs = createXAttrMap(
+        (JSONArray) json.get(XATTRS_JSON));
+    return xAttrs != null ? xAttrs.get(name) : null;
+  }
+
+  /** Convert xAttrs json to xAttrs map */
+  private Map<String, byte[]> createXAttrMap(JSONArray jsonArray) 
+      throws IOException {
+    Map<String, byte[]> xAttrs = Maps.newHashMap();
+    for (Object obj : jsonArray) {
+      JSONObject jsonObj = (JSONObject) obj;
+      final String name = (String)jsonObj.get(XATTR_NAME_JSON);
+      final byte[] value = XAttrCodec.decodeValue(
+          (String)jsonObj.get(XATTR_VALUE_JSON));
+      xAttrs.put(name, value);
+    }
+
+    return xAttrs;
+  }
+
+  /** Convert xAttr names json to names list */
+  private List<String> createXAttrNames(String xattrNamesStr) throws IOException {
+    JSONParser parser = new JSONParser();
+    JSONArray jsonArray;
+    try {
+      jsonArray = (JSONArray)parser.parse(xattrNamesStr);
+      List<String> names = Lists.newArrayListWithCapacity(jsonArray.size());
+      for (Object name : jsonArray) {
+        names.add((String) name);
+      }
+      return names;
+    } catch (ParseException e) {
+      throw new IOException("JSON parser error, " + e.getMessage(), e);
+    }
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path f) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.GETXATTRS.toString());
+    HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
+        params, f, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    return createXAttrMap((JSONArray) json.get(XATTRS_JSON));
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path f, List<String> names)
+      throws IOException {
+    Preconditions.checkArgument(names != null && !names.isEmpty(), 
+        "XAttr names cannot be null or empty.");
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.GETXATTRS.toString());
+    Map<String, List<String>> multiValuedParams = Maps.newHashMap();
+    multiValuedParams.put(XATTR_NAME_PARAM, names);
+    HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
+        params, multiValuedParams, f, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    return createXAttrMap((JSONArray) json.get(XATTRS_JSON));
+  }
+
+  @Override
+  public List<String> listXAttrs(Path f) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.LISTXATTRS.toString());
+    HttpURLConnection conn = getConnection(Operation.LISTXATTRS.getMethod(),
+        params, f, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    return createXAttrNames((String) json.get(XATTRNAMES_JSON));
+  }
+
+  @Override
+  public void removeXAttr(Path f, String name) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.REMOVEXATTR.toString());
+    params.put(XATTR_NAME_PARAM, name);
+    HttpURLConnection conn = getConnection(Operation.REMOVEXATTR.getMethod(),
+        params, f, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSUtils.java Mon Jul  7 20:43:56 2014
@@ -31,6 +31,7 @@ import java.net.URI;
 import java.net.URL;
 import java.net.URLEncoder;
 import java.text.MessageFormat;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -59,6 +60,24 @@ public class HttpFSUtils {
    */
   static URL createURL(Path path, Map<String, String> params)
     throws IOException {
+    return createURL(path, params, null);
+  }
+
+  /**
+   * Convenience method that creates an HTTP <code>URL</code> for the
+   * HttpFSServer file system operations.
+   * <p/>
+   *
+   * @param path the file path.
+   * @param params the query string parameters.
+   * @param multiValuedParams multi valued parameters of the query string
+   *
+   * @return URL a <code>URL</code> for the HttpFSServer server,
+   *
+   * @throws IOException thrown if an IO error occurs.
+   */
+  static URL createURL(Path path, Map<String, String> params, Map<String, 
+      List<String>> multiValuedParams) throws IOException {
     URI uri = path.toUri();
     String realScheme;
     if (uri.getScheme().equalsIgnoreCase(HttpFSFileSystem.SCHEME)) {
@@ -81,6 +100,18 @@ public class HttpFSUtils {
         append(URLEncoder.encode(entry.getValue(), "UTF8"));
       separator = "&";
     }
+    if (multiValuedParams != null) {
+      for (Map.Entry<String, List<String>> multiValuedEntry : 
+        multiValuedParams.entrySet()) {
+        String name = URLEncoder.encode(multiValuedEntry.getKey(), "UTF8");
+        List<String> values = multiValuedEntry.getValue();
+        for (String value : values) {
+          sb.append(separator).append(name).append("=").
+            append(URLEncoder.encode(value, "UTF8"));
+          separator = "&";
+        }
+      }
+    }
     return new URL(sb.toString());
   }
 
@@ -96,7 +127,7 @@ public class HttpFSUtils {
    * @throws IOException thrown if the current status code does not match the
    * expected one.
    */
-  @SuppressWarnings({"unchecked", "deprecation"})
+  @SuppressWarnings({"unchecked"})
   static void validateResponse(HttpURLConnection conn, int expected)
     throws IOException {
     int status = conn.getResponseCode();

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java Mon Jul  7 20:43:56 2014
@@ -25,6 +25,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.GlobFilter;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
@@ -38,9 +40,11 @@ import org.json.simple.JSONObject;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.EnumSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 
 /**
  * FileSystem operation executors used by {@link HttpFSServer}.
@@ -236,6 +240,50 @@ public class FSOperations {
   }
 
   /**
+   * Converts xAttrs to a JSON object.
+   *
+   * @param xAttrs file xAttrs.
+   * @param encoding format of xattr values.
+   *
+   * @return The JSON representation of the xAttrs.
+   * @throws IOException 
+   */
+  @SuppressWarnings({"unchecked", "rawtypes"})
+  private static Map xAttrsToJSON(Map<String, byte[]> xAttrs, 
+      XAttrCodec encoding) throws IOException {
+    Map jsonMap = new LinkedHashMap();
+    JSONArray jsonArray = new JSONArray();
+    if (xAttrs != null) {
+      for (Entry<String, byte[]> e : xAttrs.entrySet()) {
+        Map json = new LinkedHashMap();
+        json.put(HttpFSFileSystem.XATTR_NAME_JSON, e.getKey());
+        if (e.getValue() != null) {
+          json.put(HttpFSFileSystem.XATTR_VALUE_JSON, 
+              XAttrCodec.encodeValue(e.getValue(), encoding));
+        }
+        jsonArray.add(json);
+      }
+    }
+    jsonMap.put(HttpFSFileSystem.XATTRS_JSON, jsonArray);
+    return jsonMap;
+  }
+
+  /**
+   * Converts xAttr names to a JSON object.
+   *
+   * @param names file xAttr names.
+   *
+   * @return The JSON representation of the xAttr names.
+   * @throws IOException 
+   */
+  @SuppressWarnings({"unchecked", "rawtypes"})
+  private static Map xAttrNamesToJSON(List<String> names) throws IOException {
+    Map jsonMap = new LinkedHashMap();
+    jsonMap.put(HttpFSFileSystem.XATTRNAMES_JSON, JSONArray.toJSONString(names));
+    return jsonMap;
+  }
+
+  /**
    * Converts a <code>ContentSummary</code> object into a JSON array
    * object.
    *
@@ -1099,4 +1147,132 @@ public class FSOperations {
 
   }
 
+  /**
+   * Executor that performs a setxattr FileSystemAccess files system operation.
+   */
+  @InterfaceAudience.Private
+  public static class FSSetXAttr implements 
+      FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private String name;
+    private byte[] value;
+    private EnumSet<XAttrSetFlag> flag;
+
+    public FSSetXAttr(String path, String name, String encodedValue, 
+        EnumSet<XAttrSetFlag> flag) throws IOException {
+      this.path = new Path(path);
+      this.name = name;
+      this.value = XAttrCodec.decodeValue(encodedValue);
+      this.flag = flag;
+    }
+
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.setXAttr(path, name, value, flag);
+      return null;
+    }
+  }
+
+  /**
+   * Executor that performs a removexattr FileSystemAccess files system 
+   * operation.
+   */
+  @InterfaceAudience.Private
+  public static class FSRemoveXAttr implements 
+      FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private String name;
+
+    public FSRemoveXAttr(String path, String name) {
+      this.path = new Path(path);
+      this.name = name;
+    }
+
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.removeXAttr(path, name);
+      return null;
+    }
+  }
+
+  /**
+   * Executor that performs listing xattrs FileSystemAccess files system 
+   * operation.
+   */
+  @SuppressWarnings("rawtypes")
+  @InterfaceAudience.Private
+  public static class FSListXAttrs implements 
+      FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+
+    /**
+     * Creates listing xattrs executor.
+     *
+     * @param path the path to retrieve the xattrs.
+     */
+    public FSListXAttrs(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return Map a map object (JSON friendly) with the xattr names.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      List<String> names = fs.listXAttrs(path);
+      return xAttrNamesToJSON(names);
+    }
+  }
+
+  /**
+   * Executor that performs getting xattrs FileSystemAccess files system 
+   * operation.
+   */
+  @SuppressWarnings("rawtypes")
+  @InterfaceAudience.Private
+  public static class FSGetXAttrs implements 
+      FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+    private List<String> names;
+    private XAttrCodec encoding;
+
+    /**
+     * Creates getting xattrs executor.
+     *
+     * @param path the path to retrieve the xattrs.
+     */
+    public FSGetXAttrs(String path, List<String> names, XAttrCodec encoding) {
+      this.path = new Path(path);
+      this.names = names;
+      this.encoding = encoding;
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return Map a map object (JSON friendly) with the xattrs.
+     *
+     * @throws IOException thrown if an IO error occured.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      Map<String, byte[]> xattrs = null;
+      if (names != null && !names.isEmpty()) {
+        xattrs = fs.getXAttrs(path, names);
+      } else {
+        xattrs = fs.getXAttrs(path);
+      }
+      return xAttrsToJSON(xattrs, encoding);
+    }
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java Mon Jul  7 20:43:56 2014
@@ -18,10 +18,13 @@
 package org.apache.hadoop.fs.http.server;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem.Operation;
 import org.apache.hadoop.lib.wsrs.BooleanParam;
 import org.apache.hadoop.lib.wsrs.EnumParam;
+import org.apache.hadoop.lib.wsrs.EnumSetParam;
 import org.apache.hadoop.lib.wsrs.LongParam;
 import org.apache.hadoop.lib.wsrs.Param;
 import org.apache.hadoop.lib.wsrs.ParametersProvider;
@@ -92,6 +95,15 @@ public class HttpFSParametersProvider ex
             new Class[]{DoAsParam.class, AclPermissionParam.class});
     PARAMS_DEF.put(Operation.REMOVEDEFAULTACL,
             new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.SETXATTR,
+      new Class[]{DoAsParam.class, XAttrNameParam.class, XAttrValueParam.class, 
+                  XAttrSetFlagParam.class});
+    PARAMS_DEF.put(Operation.REMOVEXATTR, 
+      new Class[]{DoAsParam.class, XAttrNameParam.class});
+    PARAMS_DEF.put(Operation.GETXATTRS, 
+      new Class[]{DoAsParam.class, XAttrNameParam.class, XAttrEncodingParam.class});
+    PARAMS_DEF.put(Operation.LISTXATTRS,
+      new Class[]{DoAsParam.class});
   }
 
   public HttpFSParametersProvider() {
@@ -461,4 +473,79 @@ public class HttpFSParametersProvider ex
       super(NAME, null);
     }
   }
+  
+  /**
+   * Class for xattr parameter.
+   */
+  @InterfaceAudience.Private
+  public static class XAttrNameParam extends StringParam {
+    public static final String XATTR_NAME_REGX = 
+        "^(user\\.|trusted\\.|system\\.|security\\.).+";
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.XATTR_NAME_PARAM;
+    private static final Pattern pattern = Pattern.compile(XATTR_NAME_REGX);
+
+    /**
+     * Constructor.
+     */
+    public XAttrNameParam() {
+      super(NAME, null, pattern);
+    }
+  }
+
+  /**
+   * Class for xattr parameter.
+   */
+  @InterfaceAudience.Private
+  public static class XAttrValueParam extends StringParam {
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.XATTR_VALUE_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public XAttrValueParam() {
+      super(NAME, null);
+    }
+  }
+
+  /**
+   * Class for xattr parameter.
+   */
+  @InterfaceAudience.Private
+  public static class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> {
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.XATTR_SET_FLAG_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public XAttrSetFlagParam() {
+      super(NAME, XAttrSetFlag.class, null);
+    }
+  }
+
+  /**
+   * Class for xattr parameter.
+   */
+  @InterfaceAudience.Private
+  public static class XAttrEncodingParam extends EnumParam<XAttrCodec> {
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.XATTR_ENCODING_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public XAttrEncodingParam() {
+      super(NAME, XAttrCodec.class, null);
+    }
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java Mon Jul  7 20:43:56 2014
@@ -21,6 +21,8 @@ package org.apache.hadoop.fs.http.server
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam;
@@ -40,6 +42,10 @@ import org.apache.hadoop.fs.http.server.
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrValueParam;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.lib.service.FileSystemAccessException;
 import org.apache.hadoop.lib.service.Groups;
@@ -75,6 +81,7 @@ import java.net.URI;
 import java.security.AccessControlException;
 import java.security.Principal;
 import java.text.MessageFormat;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 
@@ -322,6 +329,27 @@ public class HttpFSServer {
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
+      case GETXATTRS: {
+        List<String> xattrNames = params.getValues(XAttrNameParam.NAME, 
+            XAttrNameParam.class);
+        XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, 
+            XAttrEncodingParam.class);
+        FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, 
+            xattrNames, encoding);
+        @SuppressWarnings("rawtypes")
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("XAttrs for [{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case LISTXATTRS: {
+        FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
+        @SuppressWarnings("rawtypes")
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("XAttr names for [{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
       default: {
         throw new IOException(
           MessageFormat.format("Invalid HTTP GET operation [{0}]",
@@ -526,6 +554,30 @@ public class HttpFSServer {
         }
         break;
       }
+      case SETXATTR: {
+        String xattrName = params.get(XAttrNameParam.NAME, 
+            XAttrNameParam.class);
+        String xattrValue = params.get(XAttrValueParam.NAME, 
+            XAttrValueParam.class);
+        EnumSet<XAttrSetFlag> flag = params.get(XAttrSetFlagParam.NAME, 
+            XAttrSetFlagParam.class);
+
+        FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr(
+            path, xattrName, xattrValue, flag);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to xAttr [{}]", path, xattrName);
+        response = Response.ok().build();
+        break;
+      }
+      case REMOVEXATTR: {
+        String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
+        FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
+            path, xattrName);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] removed xAttr [{}]", path, xattrName);
+        response = Response.ok().build();
+        break;
+      }
       case MKDIRS: {
         Short permission = params.get(PermissionParam.NAME,
                                        PermissionParam.class);

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java Mon Jul  7 20:43:56 2014
@@ -19,6 +19,9 @@ package org.apache.hadoop.lib.wsrs;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import com.google.common.collect.Lists;
+
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -28,14 +31,14 @@ import java.util.Map;
  */
 @InterfaceAudience.Private
 public class Parameters {
-  private Map<String, Param<?>> params;
+  private Map<String, List<Param<?>>> params;
 
   /**
    * Constructor that receives the request parsed parameters.
    *
    * @param params the request parsed parameters.
    */
-  public Parameters(Map<String, Param<?>> params) {
+  public Parameters(Map<String, List<Param<?>>> params) {
     this.params = params;
   }
 
@@ -44,11 +47,36 @@ public class Parameters {
    *
    * @param name parameter name.
    * @param klass class of the parameter, used for value casting.
-  * @return the value of the parameter.
+   * @return the value of the parameter.
    */
   @SuppressWarnings("unchecked")
   public <V, T extends Param<V>> V get(String name, Class<T> klass) {
-    return ((T)params.get(name)).value();
+    List<Param<?>> multiParams = (List<Param<?>>)params.get(name);
+    if (multiParams != null && multiParams.size() > 0) {
+      return ((T) multiParams.get(0)).value(); // Return first value;
+    }
+    return null;
   }
   
+  /**
+   * Returns the values of a request parsed parameter.
+   *
+   * @param name parameter name.
+   * @param klass class of the parameter, used for value casting.
+   * @return List<V> the values of the parameter.
+   */
+  @SuppressWarnings("unchecked")
+  public <V, T extends Param<V>> List<V> getValues(String name, Class<T> klass) {
+    List<Param<?>> multiParams = (List<Param<?>>)params.get(name);
+    List<V> values = Lists.newArrayList();
+    if (multiParams != null) {
+      for (Param<?> param : multiParams) {
+        V value = ((T) param).value();
+        if (value != null) {
+          values.add(value);
+        }
+      }
+    }
+    return values;
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java Mon Jul  7 20:43:56 2014
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.lib.wsrs;
 
+import com.google.common.collect.Lists;
 import com.sun.jersey.api.core.HttpContext;
 import com.sun.jersey.core.spi.component.ComponentContext;
 import com.sun.jersey.core.spi.component.ComponentScope;
@@ -31,6 +32,7 @@ import javax.ws.rs.core.MultivaluedMap;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -56,10 +58,11 @@ public class ParametersProvider
   @Override
   @SuppressWarnings("unchecked")
   public Parameters getValue(HttpContext httpContext) {
-    Map<String, Param<?>> map = new HashMap<String, Param<?>>();
-    MultivaluedMap<String, String> queryString =
+    Map<String, List<Param<?>>> map = new HashMap<String, List<Param<?>>>();
+    Map<String, List<String>> queryString =
       httpContext.getRequest().getQueryParameters();
-    String str = queryString.getFirst(driverParam);
+    String str = ((MultivaluedMap<String, String>) queryString).
+        getFirst(driverParam);
     if (str == null) {
       throw new IllegalArgumentException(
         MessageFormat.format("Missing Operation parameter [{0}]",
@@ -77,26 +80,40 @@ public class ParametersProvider
         MessageFormat.format("Unsupported Operation [{0}]", op));
     }
     for (Class<Param<?>> paramClass : paramsDef.get(op)) {
-      Param<?> param;
-      try {
-        param = paramClass.newInstance();
-      } catch (Exception ex) {
-        throw new UnsupportedOperationException(
-          MessageFormat.format(
-            "Param class [{0}] does not have default constructor",
-            paramClass.getName()));
+      Param<?> param = newParam(paramClass);
+      List<Param<?>> paramList = Lists.newArrayList();
+      List<String> ps = queryString.get(param.getName());
+      if (ps != null) {
+        for (String p : ps) {
+          try {
+            param.parseParam(p);
+          }
+          catch (Exception ex) {
+            throw new IllegalArgumentException(ex.toString(), ex);
+          }
+          paramList.add(param);
+          param = newParam(paramClass);
+        }
+      } else {
+        paramList.add(param);
       }
-      try {
-        param.parseParam(queryString.getFirst(param.getName()));
-      }
-      catch (Exception ex) {
-        throw new IllegalArgumentException(ex.toString(), ex);
-      }
-      map.put(param.getName(), param);
+
+      map.put(param.getName(), paramList);
     }
     return new Parameters(map);
   }
 
+  private Param<?> newParam(Class<Param<?>> paramClass) {
+    try {
+      return paramClass.newInstance();
+    } catch (Exception ex) {
+      throw new UnsupportedOperationException(
+        MessageFormat.format(
+          "Param class [{0}] does not have default constructor",
+          paramClass.getName()));
+    }
+  }
+
   @Override
   public ComponentScope getScope() {
     return ComponentScope.PerRequest;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java Mon Jul  7 20:43:56 2014
@@ -47,6 +47,8 @@ import org.junit.runners.Parameterized;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.webapp.WebAppContext;
 
+import com.google.common.collect.Lists;
+
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
@@ -59,6 +61,8 @@ import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.List;
+import java.util.Map;
 
 @RunWith(value = Parameterized.class)
 public abstract class BaseTestHttpFSWith extends HFSTestCase {
@@ -90,6 +94,7 @@ public abstract class BaseTestHttpFSWith
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -481,6 +486,198 @@ public abstract class BaseTestHttpFSWith
     Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
     Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
   }
+  
+  /** Set xattr */
+  private void testSetXAttr() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+ 
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      final String name5 = "a1";
+      fs = getHttpFSFileSystem();
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      try {
+        fs.setXAttr(path, name5, value1);
+        Assert.fail("Set xAttr with incorrect name format should fail.");
+      } catch (IOException e) {
+      } catch (IllegalArgumentException e) {
+      }
+      fs.close();
+
+      fs = FileSystem.get(getProxiedFSConf());
+      Map<String, byte[]> xAttrs = fs.getXAttrs(path);
+      fs.close();
+      Assert.assertEquals(4, xAttrs.size());
+      Assert.assertArrayEquals(value1, xAttrs.get(name1));
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
+      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+    }
+  }
+
+  /** Get xattrs */
+  private void testGetXAttrs() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      fs = FileSystem.get(getProxiedFSConf());
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      fs.close();
+
+      // Get xattrs with names parameter
+      fs = getHttpFSFileSystem();
+      List<String> names = Lists.newArrayList();
+      names.add(name1);
+      names.add(name2);
+      names.add(name3);
+      names.add(name4);
+      Map<String, byte[]> xAttrs = fs.getXAttrs(path, names);
+      fs.close();
+      Assert.assertEquals(4, xAttrs.size());
+      Assert.assertArrayEquals(value1, xAttrs.get(name1));
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
+      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+
+      // Get specific xattr
+      fs = getHttpFSFileSystem();
+      byte[] value = fs.getXAttr(path, name1);
+      Assert.assertArrayEquals(value1, value);
+      final String name5 = "a1";
+      try {
+        value = fs.getXAttr(path, name5);
+        Assert.fail("Get xAttr with incorrect name format should fail.");
+      } catch (IOException e) {
+      } catch (IllegalArgumentException e) {
+      }
+      fs.close();
+
+      // Get all xattrs
+      fs = getHttpFSFileSystem();
+      xAttrs = fs.getXAttrs(path);
+      fs.close();
+      Assert.assertEquals(4, xAttrs.size());
+      Assert.assertArrayEquals(value1, xAttrs.get(name1));
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+      Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
+      Assert.assertArrayEquals(value4, xAttrs.get(name4));
+    }
+  }
+
+  /** Remove xattr */
+  private void testRemoveXAttr() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      final String name5 = "a1";
+      fs = FileSystem.get(getProxiedFSConf());
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      fs.close();
+
+      fs = getHttpFSFileSystem();
+      fs.removeXAttr(path, name1);
+      fs.removeXAttr(path, name3);
+      fs.removeXAttr(path, name4);
+      try {
+        fs.removeXAttr(path, name5);
+        Assert.fail("Remove xAttr with incorrect name format should fail.");
+      } catch (IOException e) {
+      } catch (IllegalArgumentException e) {
+      }
+
+      fs = FileSystem.get(getProxiedFSConf());
+      Map<String, byte[]> xAttrs = fs.getXAttrs(path);
+      fs.close();
+      Assert.assertEquals(1, xAttrs.size());
+      Assert.assertArrayEquals(value2, xAttrs.get(name2));
+    }
+  }
+
+  /** List xattrs */
+  private void testListXAttrs() throws Exception {
+    if (!isLocalFS()) {
+      FileSystem fs = FileSystem.get(getProxiedFSConf());
+      fs.mkdirs(getProxiedFSTestDir());
+      Path path = new Path(getProxiedFSTestDir(), "foo.txt");
+      OutputStream os = fs.create(path);
+      os.write(1);
+      os.close();
+      fs.close();
+
+      final String name1 = "user.a1";
+      final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+      final String name2 = "user.a2";
+      final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+      final String name3 = "user.a3";
+      final byte[] value3 = null;
+      final String name4 = "trusted.a1";
+      final byte[] value4 = new byte[]{0x31, 0x32, 0x33};
+      fs = FileSystem.get(getProxiedFSConf());
+      fs.setXAttr(path, name1, value1);
+      fs.setXAttr(path, name2, value2);
+      fs.setXAttr(path, name3, value3);
+      fs.setXAttr(path, name4, value4);
+      fs.close();
+
+      fs = getHttpFSFileSystem();
+      List<String> names = fs.listXAttrs(path);
+      Assert.assertEquals(4, names.size());
+      Assert.assertTrue(names.contains(name1));
+      Assert.assertTrue(names.contains(name2));
+      Assert.assertTrue(names.contains(name3));
+      Assert.assertTrue(names.contains(name4));
+    }
+  }
 
   /**
    * Runs assertions testing that two AclStatus objects contain the same info
@@ -587,7 +784,7 @@ public abstract class BaseTestHttpFSWith
   protected enum Operation {
     GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
     SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY,
-    FILEACLS, DIRACLS
+    FILEACLS, DIRACLS, SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS
   }
 
   private void operation(Operation op) throws Exception {
@@ -645,6 +842,18 @@ public abstract class BaseTestHttpFSWith
       case DIRACLS:
         testDirAcls();
         break;
+      case SET_XATTR:
+        testSetXAttr();
+        break;
+      case REMOVE_XATTR:
+        testRemoveXAttr();
+        break;
+      case GET_XATTRS:
+        testGetXAttrs();
+        break;
+      case LIST_XATTRS:
+        testListXAttrs();
+        break;
     }
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java Mon Jul  7 20:43:56 2014
@@ -36,11 +36,13 @@ import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.ServiceException;
@@ -62,6 +64,8 @@ import org.junit.Test;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.webapp.WebAppContext;
 
+import com.google.common.collect.Maps;
+
 public class TestHttpFSServer extends HFSTestCase {
 
   @Test
@@ -132,6 +136,7 @@ public class TestHttpFSServer extends HF
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -356,6 +361,36 @@ public class TestHttpFSServer extends HF
     }
     return entries;
   }
+  
+  /**
+   * Parse xAttrs from JSON result of GETXATTRS call, return xAttrs Map.
+   * @param statusJson JSON from GETXATTRS
+   * @return Map<String, byte[]> xAttrs Map
+   * @throws Exception
+   */
+  private Map<String, byte[]> getXAttrs(String statusJson) throws Exception {
+    Map<String, byte[]> xAttrs = Maps.newHashMap();
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
+    JSONArray jsonXAttrs = (JSONArray) jsonObject.get("XAttrs");
+    if (jsonXAttrs != null) {
+      for (Object a : jsonXAttrs) {
+        String name = (String) ((JSONObject)a).get("name");
+        String value = (String) ((JSONObject)a).get("value");
+        xAttrs.put(name, decodeXAttrValue(value));
+      }
+    }
+    return xAttrs;
+  }
+  
+  /** Decode xattr value from string */
+  private byte[] decodeXAttrValue(String value) throws IOException {
+    if (value != null) {
+      return XAttrCodec.decodeValue(value);
+    } else {
+      return new byte[0];
+    }
+  }
 
   /**
    * Validate that files are created with 755 permissions when no
@@ -388,6 +423,60 @@ public class TestHttpFSServer extends HF
     statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
     Assert.assertTrue("321".equals(getPerms(statusJson)));
   }
+  
+  /**
+   * Validate XAttr get/set/remove calls.
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testXAttrs() throws Exception {
+    final String name1 = "user.a1";
+    final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
+    final String name2 = "user.a2";
+    final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
+    final String dir = "/xattrTest";
+    final String path = dir + "/file";
+    
+    createHttpFSServer(false);
+    
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+    
+    createWithHttp(path,null);
+    String statusJson = getStatus(path, "GETXATTRS");
+    Map<String, byte[]> xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(0, xAttrs.size());
+    
+    // Set two xattrs
+    putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
+    putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
+    statusJson = getStatus(path, "GETXATTRS");
+    xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(2, xAttrs.size());
+    Assert.assertArrayEquals(value1, xAttrs.get(name1));
+    Assert.assertArrayEquals(value2, xAttrs.get(name2));
+    
+    // Remove one xattr
+    putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
+    statusJson = getStatus(path, "GETXATTRS");
+    xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(1, xAttrs.size());
+    Assert.assertArrayEquals(value2, xAttrs.get(name2));
+    
+    // Remove another xattr, then there is no xattr
+    putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
+    statusJson = getStatus(path, "GETXATTRS");
+    xAttrs = getXAttrs(statusJson);
+    Assert.assertEquals(0, xAttrs.size());
+  }
+  
+  /** Params for setting an xAttr */
+  public static String setXAttrParam(String name, byte[] value) throws IOException {
+    return "xattr.name=" + name + "&xattr.value=" + XAttrCodec.encodeValue(
+        value, XAttrCodec.HEX) + "&encoding=hex&flag=create"; 
+  }
 
   /**
    * Validate the various ACL set/modify/remove calls.  General strategy is

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java Mon Jul  7 20:43:56 2014
@@ -147,6 +147,7 @@ public class TestHdfsHelper extends Test
       conf.set("dfs.permissions", "true");
       conf.set("hadoop.security.authentication", "simple");
       conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
       MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
       builder.numDataNodes(2);
       MiniDFSCluster miniHdfs = builder.build();

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java Mon Jul  7 20:43:56 2014
@@ -55,4 +55,6 @@ public class NfsConfigKeys {
   public static final String DFS_NFS_PORT_MONITORING_DISABLED_KEY = "nfs.port.monitoring.disabled";
   public static final boolean DFS_NFS_PORT_MONITORING_DISABLED_DEFAULT = true;
 
-}
\ No newline at end of file
+  public static final String  AIX_COMPAT_MODE_KEY = "nfs.aix.compatibility.mode.enabled";
+  public static final boolean AIX_COMPAT_MODE_DEFAULT = false;
+}

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java Mon Jul  7 20:43:56 2014
@@ -160,7 +160,7 @@ public class Nfs3Utils {
   }
 
   public static int getAccessRightsForUserGroup(int uid, int gid,
-      Nfs3FileAttributes attr) {
+      int[] auxGids, Nfs3FileAttributes attr) {
     int mode = attr.getMode();
     if (uid == attr.getUid()) {
       return getAccessRights(mode >> 6, attr.getType());
@@ -168,6 +168,14 @@ public class Nfs3Utils {
     if (gid == attr.getGid()) {
       return getAccessRights(mode >> 3, attr.getType());
     }
+    // Check for membership in auxiliary groups
+    if (auxGids != null) {
+      for (int auxGid : auxGids) {
+        if (attr.getGid() == auxGid) {
+          return getAccessRights(mode >> 3, attr.getType());
+        }
+      }
+    }
     return getAccessRights(mode, attr.getType());
   }
   
@@ -191,4 +199,4 @@ public class Nfs3Utils {
     data[7] = (byte) (v >>> 0);
     return data;
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Mon Jul  7 20:43:56 2014
@@ -95,6 +95,7 @@ class OpenFileCtx {
    */
   private AtomicLong nextOffset;
   private final HdfsDataOutputStream fos;
+  private final boolean aixCompatMode;
   
   // It's updated after each sync to HDFS
   private Nfs3FileAttributes latestAttr;
@@ -199,8 +200,15 @@ class OpenFileCtx {
   
   OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
       String dumpFilePath, DFSClient client, IdUserGroup iug) {
+    this(fos, latestAttr, dumpFilePath, client, iug, false);
+  }
+  
+  OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
+      String dumpFilePath, DFSClient client, IdUserGroup iug,
+      boolean aixCompatMode) {
     this.fos = fos;
     this.latestAttr = latestAttr;
+    this.aixCompatMode = aixCompatMode;
     // We use the ReverseComparatorOnMin as the comparator of the map. In this
     // way, we first dump the data with larger offset. In the meanwhile, we
     // retrieve the last element to write back to HDFS.
@@ -780,15 +788,29 @@ class OpenFileCtx {
     }
 
     if (commitOffset > 0) {
-      if (commitOffset > flushed) {
-        if (!fromRead) {
-          CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
-              preOpAttr);
-          pendingCommits.put(commitOffset, commitCtx);
+      if (aixCompatMode) {
+        // The AIX NFS client misinterprets RFC-1813 and will always send 4096
+        // for the commitOffset even if fewer bytes than that have ever (or will
+        // ever) be sent by the client. So, if in AIX compatibility mode, we
+        // will always DO_SYNC if the number of bytes to commit have already all
+        // been flushed, else we will fall through to the logic below which
+        // checks for pending writes in the case that we're being asked to
+        // commit more bytes than have so far been flushed. See HDFS-6549 for
+        // more info.
+        if (commitOffset <= flushed) {
+          return COMMIT_STATUS.COMMIT_DO_SYNC;
         }
-        return COMMIT_STATUS.COMMIT_WAIT;
       } else {
-        return COMMIT_STATUS.COMMIT_DO_SYNC;
+        if (commitOffset > flushed) {
+          if (!fromRead) {
+            CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid,
+                preOpAttr);
+            pendingCommits.put(commitOffset, commitCtx);
+          }
+          return COMMIT_STATUS.COMMIT_WAIT;
+        } else {
+          return COMMIT_STATUS.COMMIT_DO_SYNC;
+        } 
       }
     }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Mon Jul  7 20:43:56 2014
@@ -153,6 +153,7 @@ public class RpcProgramNfs3 extends RpcP
   private final short replication;
   private final long blockSize;
   private final int bufferSize;
+  private final boolean aixCompatMode;
   private Statistics statistics;
   private String writeDumpDir; // The dir save dump files
   
@@ -170,8 +171,11 @@ public class RpcProgramNfs3 extends RpcP
     config.set(FsPermission.UMASK_LABEL, "000");
     iug = new IdUserGroup(config);
     
+    aixCompatMode = config.getBoolean(
+        NfsConfigKeys.AIX_COMPAT_MODE_KEY,
+        NfsConfigKeys.AIX_COMPAT_MODE_DEFAULT);
     exports = NfsExports.getInstance(config);
-    writeManager = new WriteManager(iug, config);
+    writeManager = new WriteManager(iug, config, aixCompatMode);
     clientCache = new DFSClientCache(config);
     replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
         DFSConfigKeys.DFS_REPLICATION_DEFAULT);
@@ -500,7 +504,8 @@ public class RpcProgramNfs3 extends RpcP
         return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
       }
       int access = Nfs3Utils.getAccessRightsForUserGroup(
-          securityHandler.getUid(), securityHandler.getGid(), attrs);
+          securityHandler.getUid(), securityHandler.getGid(),
+          securityHandler.getAuxGids(), attrs);
       
       return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
     } catch (RemoteException r) {
@@ -655,7 +660,8 @@ public class RpcProgramNfs3 extends RpcP
         return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
       }
       int access = Nfs3Utils.getAccessRightsForUserGroup(
-          securityHandler.getUid(), securityHandler.getGid(), attrs);
+          securityHandler.getUid(), securityHandler.getGid(),
+          securityHandler.getAuxGids(), attrs);
       if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
         eof = offset < attrs.getSize() ? false : true;
         return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
@@ -900,7 +906,8 @@ public class RpcProgramNfs3 extends RpcP
       
       // Add open stream
       OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr,
-          writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug);
+          writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug,
+          aixCompatMode);
       fileHandle = new FileHandle(postOpObjAttr.getFileId());
       if (!writeManager.addOpenFileStream(fileHandle, openFileCtx)) {
         LOG.warn("Can't add more stream, close it."
@@ -1438,9 +1445,24 @@ public class RpcProgramNfs3 extends RpcP
       }
       long cookieVerf = request.getCookieVerf();
       if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
-        LOG.error("CookierVerf mismatch. request cookierVerf:" + cookieVerf
-            + " dir cookieVerf:" + dirStatus.getModificationTime());
-        return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
+        if (aixCompatMode) {
+          // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
+          // the same cookieverf value even across VFS-level readdir calls,
+          // instead of getting a new cookieverf for every VFS-level readdir
+          // call, and reusing the cookieverf only in the event that multiple
+          // incremental NFS-level readdir calls must be made to fetch all of
+          // the directory entries. This means that whenever a readdir call is
+          // made by an AIX NFS client for a given directory, and that directory
+          // is subsequently modified, thus changing its mtime, no later readdir
+          // calls will succeed from AIX for that directory until the FS is
+          // unmounted/remounted. See HDFS-6549 for more info.
+          LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
+              "mismatches.");
+        } else {
+          LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf
+              + " dir cookieVerf: " + dirStatus.getModificationTime());
+          return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
+        }
       }
 
       if (cookie == 0) {
@@ -1588,9 +1610,22 @@ public class RpcProgramNfs3 extends RpcP
       }
       long cookieVerf = request.getCookieVerf();
       if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
-        LOG.error("CookierVerf mismatch. request cookierVerf:" + cookieVerf
-            + " dir cookieVerf:" + dirStatus.getModificationTime());
-        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
+        if (aixCompatMode) {
+          // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
+          // the same cookieverf value even across VFS-level readdir calls,
+          // instead of getting a new cookieverf for every VFS-level readdir
+          // call. This means that whenever a readdir call is made by an AIX NFS
+          // client for a given directory, and that directory is subsequently
+          // modified, thus changing its mtime, no later readdir calls will
+          // succeed for that directory from AIX until the FS is
+          // unmounted/remounted. See HDFS-6549 for more info.
+          LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
+              "mismatches.");
+        } else {
+          LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf
+              + " dir cookieverf: " + dirStatus.getModificationTime());
+          return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
+        }
       }
 
       if (cookie == 0) {

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java Mon Jul  7 20:43:56 2014
@@ -58,6 +58,7 @@ public class WriteManager {
   private boolean asyncDataServiceStarted = false;
 
   private final int maxStreams;
+  private final boolean aixCompatMode;
 
   /**
    * The time limit to wait for accumulate reordered sequential writes to the
@@ -79,9 +80,11 @@ public class WriteManager {
     return fileContextCache.put(h, ctx);
   }
   
-  WriteManager(IdUserGroup iug, final NfsConfiguration config) {
+  WriteManager(IdUserGroup iug, final NfsConfiguration config,
+      boolean aixCompatMode) {
     this.iug = iug;
     this.config = config;
+    this.aixCompatMode = aixCompatMode;
     streamTimeout = config.getLong(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY,
         NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
     LOG.info("Stream timeout is " + streamTimeout + "ms.");
@@ -175,7 +178,7 @@ public class WriteManager {
       String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY,
           NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
       openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
-          + fileHandle.getFileId(), dfsClient, iug);
+          + fileHandle.getFileId(), dfsClient, iug, aixCompatMode);
 
       if (!addOpenFileStream(fileHandle, openFileCtx)) {
         LOG.info("Can't add new stream. Close it. Tell client to retry.");

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java Mon Jul  7 20:43:56 2014
@@ -190,6 +190,29 @@ public class TestWrites {
     ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
     Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
   }
+  
+  @Test
+  public void testCheckCommitAixCompatMode() throws IOException {
+    DFSClient dfsClient = Mockito.mock(DFSClient.class);
+    Nfs3FileAttributes attr = new Nfs3FileAttributes();
+    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
+
+    // Last argument "true" here to enable AIX compatibility mode.
+    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
+        new IdUserGroup(new NfsConfiguration()), true);
+    
+    // Test fall-through to pendingWrites check in the event that commitOffset
+    // is greater than the number of bytes we've so far flushed.
+    Mockito.when(fos.getPos()).thenReturn((long) 2);
+    COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
+    Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);
+    
+    // Test the case when we actually have received more bytes than we're trying
+    // to commit.
+    Mockito.when(fos.getPos()).thenReturn((long) 10);
+    status = ctx.checkCommitInternal(5, null, 1, attr, false);
+    Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
+  }
 
   @Test
   // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
@@ -207,7 +230,7 @@ public class TestWrites {
 
     FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
     COMMIT_STATUS ret;
-    WriteManager wm = new WriteManager(new IdUserGroup(config), config);
+    WriteManager wm = new WriteManager(new IdUserGroup(config), config, false);
     assertTrue(wm.addOpenFileStream(h, ctx));
     
     // Test inactive open file context