You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2014/08/21 07:22:16 UTC

svn commit: r1619293 [4/4] - in /hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ sr...

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java Thu Aug 21 05:22:10 2014
@@ -47,15 +47,27 @@ import com.google.common.base.Preconditi
  * <br>
  * SYSTEM - extended system attributes: these are used by the HDFS
  * core and are not available through admin/user API.
+ * <br>
+ * RAW - extended system attributes: these are used for internal system
+ *   attributes that sometimes need to be exposed. Like SYSTEM namespace
+ *   attributes they are not visible to the user except when getXAttr/getXAttrs
+ *   is called on a file or directory in the /.reserved/raw HDFS directory
+ *   hierarchy. These attributes can only be accessed by the superuser.
+ * </br>
  */
 @InterfaceAudience.Private
 public class XAttrPermissionFilter {
   
-  static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr) 
+  static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr,
+      boolean isRawPath)
       throws AccessControlException {
+    final boolean isSuperUser = pc.isSuperUser();
     if (xAttr.getNameSpace() == XAttr.NameSpace.USER || 
-        (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
-        pc.isSuperUser())) {
+        (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) {
+      return;
+    }
+    if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
+        isRawPath && isSuperUser) {
       return;
     }
     throw new AccessControlException("User doesn't have permission for xattr: "
@@ -63,30 +75,34 @@ public class XAttrPermissionFilter {
   }
 
   static void checkPermissionForApi(FSPermissionChecker pc,
-                                    List<XAttr> xAttrs) throws AccessControlException {
+      List<XAttr> xAttrs, boolean isRawPath) throws AccessControlException {
     Preconditions.checkArgument(xAttrs != null);
     if (xAttrs.isEmpty()) {
       return;
     }
 
     for (XAttr xAttr : xAttrs) {
-      checkPermissionForApi(pc, xAttr);
+      checkPermissionForApi(pc, xAttr, isRawPath);
     }
   }
 
   static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
-      List<XAttr> xAttrs) {
+      List<XAttr> xAttrs, boolean isRawPath) {
     assert xAttrs != null : "xAttrs can not be null";
     if (xAttrs == null || xAttrs.isEmpty()) {
       return xAttrs;
     }
     
     List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
+    final boolean isSuperUser = pc.isSuperUser();
     for (XAttr xAttr : xAttrs) {
       if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
         filteredXAttrs.add(xAttr);
       } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
-          pc.isSuperUser()) {
+          isSuperUser) {
+        filteredXAttrs.add(xAttr);
+      } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
+          isSuperUser && isRawPath) {
         filteredXAttrs.add(xAttr);
       }
     }

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Thu Aug 21 05:22:10 2014
@@ -257,7 +257,7 @@ public class JsonUtil {
           BlockStoragePolicy.ID_UNSPECIFIED;
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
         blockSize, mTime, aTime, permission, owner, group, symlink,
-        DFSUtil.string2Bytes(localName), fileId, childrenNum, storagePolicy);
+        DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
   }
 
   /** Convert an ExtendedBlock to a Json map. */
@@ -537,7 +537,7 @@ public class JsonUtil {
         (Map<?, ?>)m.get("lastLocatedBlock"));
     final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
     return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
-        lastLocatedBlock, isLastBlockComplete);
+        lastLocatedBlock, isLastBlockComplete, null);
   }
 
   /** Convert a ContentSummary to a Json string. */

Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1616894-1619277
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1594376-1619194

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Thu Aug 21 05:22:10 2014
@@ -32,6 +32,7 @@ import "Security.proto";
 import "hdfs.proto";
 import "acl.proto";
 import "xattr.proto";
+import "encryption.proto";
 
 /**
  * The ClientNamenodeProtocol Service defines the interface between a client 
@@ -73,6 +74,7 @@ message CreateRequestProto {
   required bool createParent = 5;
   required uint32 replication = 6; // Short: Only 16 bits used
   required uint64 blockSize = 7;
+  repeated CipherSuite cipherSuites = 8;
 }
 
 message CreateResponseProto {
@@ -803,4 +805,10 @@ service ClientNamenodeProtocol {
       returns(RemoveXAttrResponseProto);
   rpc checkAccess(CheckAccessRequestProto)
       returns(CheckAccessResponseProto);
+  rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
+      returns(CreateEncryptionZoneResponseProto);
+  rpc listEncryptionZones(ListEncryptionZonesRequestProto)
+      returns(ListEncryptionZonesResponseProto);
+  rpc getEZForPath(GetEZForPathRequestProto)
+      returns(GetEZForPathResponseProto);
 }

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Thu Aug 21 05:22:10 2014
@@ -201,6 +201,23 @@ message DataEncryptionKeyProto {
   optional string encryptionAlgorithm = 6;
 }
 
+/**
+ * Cipher suite.
+ */
+enum CipherSuite {
+    UNKNOWN = 1;
+    AES_CTR_NOPADDING = 2;
+}
+
+/**
+ * Encryption information for a file.
+ */
+message FileEncryptionInfoProto {
+  required CipherSuite suite = 1;
+  required bytes key = 2;
+  required bytes iv = 3;
+  required string ezKeyVersionName = 4;
+}
 
 /**
  * A set of file blocks and their locations.
@@ -211,9 +228,9 @@ message LocatedBlocksProto {
   required bool underConstruction = 3;
   optional LocatedBlockProto lastBlock = 4;
   required bool isLastBlockComplete = 5;
+  optional FileEncryptionInfoProto fileEncryptionInfo = 6;
 }
 
-
 /**
  * Status of a file, directory or symlink
  * Optionally includes a file's block locations if requested by client on the rpc call.
@@ -244,8 +261,12 @@ message HdfsFileStatusProto {
   // Optional field for fileId
   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
   optional int32 childrenNum = 14 [default = -1];
-  optional uint32 storagePolicy = 15 [default = 0]; // block storage policy id
-}
+
+  // Optional field for file encryption
+  optional FileEncryptionInfoProto fileEncryptionInfo = 15;
+
+  optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
+} 
 
 /**
  * Checksum algorithms/types used in HDFS

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto Thu Aug 21 05:22:10 2014
@@ -27,6 +27,7 @@ message XAttrProto {
     TRUSTED   = 1;
     SECURITY  = 2;
     SYSTEM    = 3;
+    RAW       = 4;
   }
   
   required XAttrNamespaceProto namespace = 1;

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Thu Aug 21 05:22:10 2014
@@ -1475,11 +1475,8 @@
   <value></value>
   <description>
     SaslPropertiesResolver used to resolve the QOP used for a connection to the
-    DataNode when reading or writing block data.  If not specified, the full set
-    of values specified in dfs.data.transfer.protection is used while
-    determining the QOP used for the connection. If a class is specified, then
-    the QOP values returned by the class will be used while determining the QOP
-    used for the connection.
+    DataNode when reading or writing block data. If not specified, the value of
+    hadoop.security.saslproperties.resolver.class is used as the default value.
   </description>
 </property>
 
@@ -2061,4 +2058,13 @@
     block layout (see HDFS-6482 for details on the layout).</description>
 </property>
 
+<property>
+  <name>dfs.namenode.list.encryption.zones.num.responses</name>
+  <value>100</value>
+  <description>When listing encryption zones, the maximum number of zones
+    that will be returned in a batch. Fetching the list incrementally in
+    batches improves namenode performance.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1594376-1619194
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1609845-1619277

Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1616894-1619277
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1594376-1619194

Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1594376-1619194
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1614232-1619277

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm Thu Aug 21 05:22:10 2014
@@ -30,7 +30,7 @@ Extended Attributes in HDFS
 
 ** {Namespaces and Permissions}
 
-  In HDFS, as in Linux, there are four valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, and <<<security>>>. Each of these namespaces have different access restrictions.
+  In HDFS, there are five valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, <<<security>>>, and <<<raw>>>. Each of these namespaces have different access restrictions.
 
   The <<<user>>> namespace is the namespace that will commonly be used by client applications. Access to extended attributes in the user namespace is controlled by the corresponding file permissions.
 
@@ -40,6 +40,8 @@ Extended Attributes in HDFS
 
   The <<<security>>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused.
 
+ The <<<raw>>> namespace is reserved for internal system attributes that sometimes need to be exposed. Like <<<system>>> namespace attributes they are not visible to the user except when <<<getXAttr>>>/<<<getXAttrs>>> is called on a file or directory in the <<</.reserved/raw>>> HDFS directory hierarchy. These attributes can only be accessed by the superuser. An example of where <<<raw>>> namespace extended attributes are used is the <<<distcp>>> utility. Encryption zone meta data is stored in <<<raw.*>>> extended attributes, so as long as the administrator uses <<</.reserved/raw>>> pathnames in source and target, the encrypted files in the encryption zones are transparently copied.
+
 * {Interacting with extended attributes}
 
   The Hadoop shell has support for interacting with extended attributes via <<<hadoop fs -getfattr>>> and <<<hadoop fs -setfattr>>>. These commands are styled after the Linux {{{http://www.bestbits.at/acl/man/man1/getfattr.txt}getfattr(1)}} and {{{http://www.bestbits.at/acl/man/man1/setfattr.txt}setfattr(1)}} commands.

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Thu Aug 21 05:22:10 2014
@@ -209,7 +209,7 @@ HDFS NFS Gateway
    [[2]] Start package included portmap (needs root privileges):
 
 -------------------------
-     hadoop portmap
+     hdfs portmap
   
      OR
 
@@ -224,7 +224,7 @@ HDFS NFS Gateway
      as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file".
 
 -------------------------
-     hadoop nfs3
+     hdfs nfs3
 
      OR
 

Propchange: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1594376-1619194
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1609845-1619277

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java Thu Aug 21 05:22:10 2014
@@ -29,7 +29,7 @@ import org.junit.Test;
  * Tests for <code>XAttr</code> objects.
  */
 public class TestXAttr {
-  private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4;
+  private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4, XATTR5;
   
   @BeforeClass
   public static void setUp() throws Exception {
@@ -58,6 +58,11 @@ public class TestXAttr {
       .setName("name")
       .setValue(value)
       .build();
+    XATTR5 = new XAttr.Builder()
+      .setNameSpace(XAttr.NameSpace.RAW)
+      .setName("name")
+      .setValue(value)
+      .build();
   }
   
   @Test
@@ -65,14 +70,17 @@ public class TestXAttr {
     assertNotSame(XATTR1, XATTR2);
     assertNotSame(XATTR2, XATTR3);
     assertNotSame(XATTR3, XATTR4);
+    assertNotSame(XATTR4, XATTR5);
     assertEquals(XATTR, XATTR1);
     assertEquals(XATTR1, XATTR1);
     assertEquals(XATTR2, XATTR2);
     assertEquals(XATTR3, XATTR3);
     assertEquals(XATTR4, XATTR4);
+    assertEquals(XATTR5, XATTR5);
     assertFalse(XATTR1.equals(XATTR2));
     assertFalse(XATTR2.equals(XATTR3));
     assertFalse(XATTR3.equals(XATTR4));
+    assertFalse(XATTR4.equals(XATTR5));
   }
   
   @Test
@@ -81,5 +89,6 @@ public class TestXAttr {
     assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
     assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
     assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
+    assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
   }
 }

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Thu Aug 21 05:22:10 2014
@@ -27,6 +27,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
@@ -78,6 +79,7 @@ import org.junit.Assume;
 import java.io.*;
 import java.net.*;
 import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
 import java.security.PrivilegedExceptionAction;
 import java.util.*;
 import java.util.concurrent.TimeoutException;
@@ -86,6 +88,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /** Utilities for HDFS tests */
 public class DFSTestUtil {
@@ -1305,6 +1308,71 @@ public class DFSTestUtil {
   }
 
   /**
+   * Verify that two files have the same contents.
+   *
+   * @param fs The file system containing the two files.
+   * @param p1 The path of the first file.
+   * @param p2 The path of the second file.
+   * @param len The length of the two files.
+   * @throws IOException
+   */
+  public static void verifyFilesEqual(FileSystem fs, Path p1, Path p2, int len)
+      throws IOException {
+    final FSDataInputStream in1 = fs.open(p1);
+    final FSDataInputStream in2 = fs.open(p2);
+    for (int i = 0; i < len; i++) {
+      assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
+    }
+    in1.close();
+    in2.close();
+  }
+
+  /**
+   * Verify that two files have different contents.
+   *
+   * @param fs The file system containing the two files.
+   * @param p1 The path of the first file.
+   * @param p2 The path of the second file.
+   * @param len The length of the two files.
+   * @throws IOException
+   */
+  public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2,
+      int len)
+          throws IOException {
+    final FSDataInputStream in1 = fs.open(p1);
+    final FSDataInputStream in2 = fs.open(p2);
+    try {
+      for (int i = 0; i < len; i++) {
+        if (in1.read() != in2.read()) {
+          return;
+        }
+      }
+      fail("files are equal, but should not be");
+    } finally {
+      in1.close();
+      in2.close();
+    }
+  }
+
+  /**
+   * Helper function to create a key in the Key Provider.
+   *
+   * @param keyName The name of the key to create
+   * @param cluster The cluster to create it in
+   * @param conf Configuration to use
+   */
+  public static void createKey(String keyName, MiniDFSCluster cluster,
+                                Configuration conf)
+          throws NoSuchAlgorithmException, IOException {
+    KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider();
+    final KeyProvider.Options options = KeyProvider.options(conf);
+    options.setDescription(keyName);
+    options.setBitLength(128);
+    provider.createKey(keyName, options);
+    provider.flush();
+  }
+
+  /**
    * @return the node which is expected to run the recovery of the
    * given block, which is known to be under construction inside the
    * given NameNOde.

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Aug 21 05:22:10 2014
@@ -144,7 +144,7 @@ public class MiniDFSCluster {
     private int nameNodeHttpPort = 0;
     private final Configuration conf;
     private int numDataNodes = 1;
-    private StorageType storageType = StorageType.DEFAULT;
+    private StorageType[][] storageTypes = null;
     private boolean format = true;
     private boolean manageNameDfsDirs = true;
     private boolean manageNameDfsSharedDirs = true;
@@ -193,10 +193,26 @@ public class MiniDFSCluster {
     }
 
     /**
-     * Default: StorageType.DEFAULT
+     * Set the same storage type configuration for each datanode.
+     * If storageTypes is uninitialized or passed null then
+     * StorageType.DEFAULT is used.
      */
-    public Builder storageType(StorageType type) {
-      this.storageType = type;
+    public Builder storageTypes(StorageType[] types) {
+      assert types.length == DIRS_PER_DATANODE;
+      this.storageTypes = new StorageType[numDataNodes][types.length];
+      for (int i = 0; i < numDataNodes; ++i) {
+        this.storageTypes[i] = types;
+      }
+      return this;
+    }
+
+    /**
+     * Set custom storage type configuration for each datanode.
+     * If storageTypes is uninitialized or passed null then
+     * StorageType.DEFAULT is used.
+     */
+    public Builder storageTypes(StorageType[][] types) {
+      this.storageTypes = types;
       return this;
     }
 
@@ -369,7 +385,8 @@ public class MiniDFSCluster {
       builder.nnTopology = MiniDFSNNTopology.simpleSingleNN(
           builder.nameNodePort, builder.nameNodeHttpPort);
     }
-    
+    assert builder.storageTypes == null ||
+           builder.storageTypes.length == builder.numDataNodes;
     final int numNameNodes = builder.nnTopology.countNameNodes();
     LOG.info("starting cluster: numNameNodes=" + numNameNodes
         + ", numDataNodes=" + builder.numDataNodes);
@@ -377,7 +394,7 @@ public class MiniDFSCluster {
       
     initMiniDFSCluster(builder.conf,
                        builder.numDataNodes,
-                       builder.storageType,
+                       builder.storageTypes,
                        builder.format,
                        builder.manageNameDfsDirs,
                        builder.manageNameDfsSharedDirs,
@@ -477,8 +494,8 @@ public class MiniDFSCluster {
    * Servers will be started on free ports.
    * <p>
    * The caller must manage the creation of NameNode and DataNode directories
-   * and have already set {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   * {@link #DFS_DATANODE_DATA_DIR_KEY} in the given conf.
+   * and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf.
    * 
    * @param conf the base configuration to use in starting the servers.  This
    *          will be modified as necessary.
@@ -554,8 +571,8 @@ public class MiniDFSCluster {
    * @param format if true, format the NameNode and DataNodes before starting 
    *          up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   *          {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in 
+   *          created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   *          {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
    *          the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -586,8 +603,8 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   *          {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in 
+   *          created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   *          {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
    *          the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -620,11 +637,11 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageNameDfsDirs if true, the data directories for servers will be
-   *          created and {@link #DFS_NAMENODE_NAME_DIR_KEY} and 
-   *          {@link #DFS_DATANODE_DATA_DIR_KEY} will be set in 
+   *          created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
+   *          {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
    *          the conf
    * @param manageDataDfsDirs if true, the data directories for datanodes will
-   *          be created and {@link #DFS_DATANODE_DATA_DIR_KEY} 
+   *          be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}
    *          set to same in the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -643,7 +660,7 @@ public class MiniDFSCluster {
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
     this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
-    initMiniDFSCluster(conf, numDataNodes, StorageType.DEFAULT, format,
+    initMiniDFSCluster(conf, numDataNodes, null, format,
         manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, 
         operation, null, racks, hosts,
         simulatedCapacities, null, true, false,
@@ -652,7 +669,7 @@ public class MiniDFSCluster {
 
   private void initMiniDFSCluster(
       Configuration conf,
-      int numDataNodes, StorageType storageType, boolean format, boolean manageNameDfsDirs,
+      int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
       boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
       boolean manageDataDfsDirs, StartupOption startOpt,
       StartupOption dnStartOpt, String[] racks,
@@ -725,7 +742,7 @@ public class MiniDFSCluster {
       }
 
       // Start the DataNodes
-      startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
+      startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
           dnStartOpt != null ? dnStartOpt : startOpt,
           racks, hosts, simulatedCapacities, setupHostsFile,
           checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
@@ -1100,15 +1117,18 @@ public class MiniDFSCluster {
     }
   }
 
-  String makeDataNodeDirs(int dnIndex, StorageType storageType) throws IOException {
+  String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException {
     StringBuilder sb = new StringBuilder();
+    assert storageTypes == null || storageTypes.length == DIRS_PER_DATANODE;
     for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
       File dir = getInstanceStorageDir(dnIndex, j);
       dir.mkdirs();
       if (!dir.isDirectory()) {
         throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
       }
-      sb.append((j > 0 ? "," : "") + "[" + storageType + "]" + fileAsURI(dir));
+      sb.append((j > 0 ? "," : "") + "[" +
+          (storageTypes == null ? StorageType.DEFAULT : storageTypes[j]) +
+          "]" + fileAsURI(dir));
     }
     return sb.toString();
   }
@@ -1127,7 +1147,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be set 
+   *          created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set
    *          in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -1159,7 +1179,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be 
+   *          created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
    *          set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -1175,21 +1195,17 @@ public class MiniDFSCluster {
                              String[] racks, String[] hosts,
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
-    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
+    startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
         simulatedCapacities, setupHostsFile, false, false, null);
   }
 
-  /**
-   * @see MiniDFSCluster#startDataNodes(Configuration, int, boolean, StartupOption,
-   * String[], String[], long[], boolean, boolean, boolean)
-   */
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
       boolean manageDfsDirs, StartupOption operation, 
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig) throws IOException {
-    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
+    startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
         simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
   }
 
@@ -1207,7 +1223,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and {@link #DFS_DATANODE_DATA_DIR_KEY} will be 
+   *          created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
    *          set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
@@ -1222,13 +1238,15 @@ public class MiniDFSCluster {
    * @throws IllegalStateException if NameNode has been shutdown
    */
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
+      StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig,
       Configuration[] dnConfOverlays) throws IOException {
+    assert storageTypes == null || storageTypes.length == numDataNodes;
+
     if (operation == StartupOption.RECOVER) {
       return;
     }
@@ -1289,7 +1307,7 @@ public class MiniDFSCluster {
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        String dirs = makeDataNodeDirs(i, storageType);
+        String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
         dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
         conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
       }
@@ -2173,7 +2191,7 @@ public class MiniDFSCluster {
   }
 
   /**
-   * Multiple-NameNode version of {@link #injectBlocks(Iterable[])}.
+   * Multiple-NameNode version of injectBlocks.
    */
   public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
       Iterable<Block> blocksToInject) throws IOException {

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java Thu Aug 21 05:22:10 2014
@@ -50,12 +50,14 @@ public class MiniDFSClusterWithNodeGroup
   }
 
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
+      StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] nodeGroups, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig) throws IOException {
+    assert storageTypes == null || storageTypes.length == numDataNodes;
+
     if (operation == StartupOption.RECOVER) {
       return;
     }
@@ -112,7 +114,7 @@ public class MiniDFSClusterWithNodeGroup
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        String dirs = makeDataNodeDirs(i, storageType);
+        String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
         dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
         conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
       }
@@ -190,7 +192,7 @@ public class MiniDFSClusterWithNodeGroup
       String[] racks, String[] nodeGroups, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile) throws IOException {
-    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, nodeGroups,
+    startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, nodeGroups,
         hosts, simulatedCapacities, setupHostsFile, false, false);
   }
 
@@ -205,14 +207,14 @@ public class MiniDFSClusterWithNodeGroup
   // This is for initialize from parent class.
   @Override
   public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
-      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
+      StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig,
       Configuration[] dnConfOverlays) throws IOException {
-    startDataNodes(conf, numDataNodes, storageType, manageDfsDirs, operation, racks,
+    startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
         NODE_GROUPS, hosts, simulatedCapacities, setupHostsFile, 
         checkDataNodeAddrConfig, checkDataNodeHostConfig);
   }

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Aug 21 05:22:10 2014
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyList;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Matchers.anyShort;
@@ -51,6 +52,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -253,16 +255,16 @@ public class TestDFSClientRetries {
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0, (byte) 0)).when(mockNN).getFileInfo(anyString());
+                1010, 0, null, (byte) 0)).when(mockNN).getFileInfo(anyString());
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0, (byte) 0))
+                1010, 0, null, (byte) 0))
         .when(mockNN)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong());
+            anyShort(), anyLong(), (List<CipherSuite>) anyList());
 
     final DFSClient client = new DFSClient(null, mockNN, conf, null);
     OutputStream os = client.create("testfile", true);
@@ -494,7 +496,8 @@ public class TestDFSClientRetries {
       List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
       badBlocks.add(badLocatedBlock);
       return new LocatedBlocks(goodBlockList.getFileLength(), false,
-                               badBlocks, null, true);
+                               badBlocks, null, true,
+                               null);
     }
   }
   

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java Thu Aug 21 05:22:10 2014
@@ -27,6 +27,9 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.junit.Test;
 
@@ -125,4 +128,45 @@ public class TestDFSRename {
       if (cluster != null) {cluster.shutdown();}
     }
   }
+  
+  /**
+   * Check the blocks of dst file are cleaned after rename with overwrite
+   */
+  @Test(timeout = 120000)
+  public void testRenameWithOverwrite() throws Exception {
+    final short replFactor = 2;
+    final long blockSize = 512;
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(replFactor).build();
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    try {
+      
+      long fileLen = blockSize*3;
+      String src = "/foo/src";
+      String dst = "/foo/dst";
+      Path srcPath = new Path(src);
+      Path dstPath = new Path(dst);
+      
+      DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
+      DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
+      
+      LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(
+          cluster.getNameNode(), dst, 0, fileLen);
+      BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).
+          getBlockManager();
+      assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
+          getLocalBlock()) != null);
+      dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
+      assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
+          getLocalBlock()) == null);
+    } finally {
+      if (dfs != null) {
+        dfs.close();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Thu Aug 21 05:22:10 2014
@@ -77,6 +77,13 @@ public class TestDFSShell {
 
   static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class);
 
+  private static final String RAW_A1 = "raw.a1";
+  private static final String TRUSTED_A1 = "trusted.a1";
+  private static final String USER_A1 = "user.a1";
+  private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32};
+  private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31};
+  private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33};
+
   static Path writeFile(FileSystem fs, Path f) throws IOException {
     DataOutputStream out = fs.create(f);
     out.writeBytes("dhruba: " + f);
@@ -1664,8 +1671,8 @@ public class TestDFSShell {
       final String group = status.getGroup();
       final FsPermission perm = status.getPermission();
       
-      fs.setXAttr(src, "user.a1", new byte[]{0x31, 0x32, 0x33});
-      fs.setXAttr(src, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+      fs.setXAttr(src, USER_A1, USER_A1_VALUE);
+      fs.setXAttr(src, TRUSTED_A1, TRUSTED_A1_VALUE);
       
       shell = new FsShell(conf);
       
@@ -1722,8 +1729,8 @@ public class TestDFSShell {
       assertTrue(perm.equals(targetPerm));
       xattrs = fs.getXAttrs(target3);
       assertEquals(xattrs.size(), 2);
-      assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
-      assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
+      assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
       acls = fs.getAclStatus(target3).getEntries();
       assertTrue(acls.isEmpty());
       assertFalse(targetPerm.getAclBit());
@@ -1780,6 +1787,160 @@ public class TestDFSShell {
     }
   }
 
+  @Test (timeout = 120000)
+  public void testCopyCommandsWithRawXAttrs() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+      numDataNodes(1).format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithRawXAttrs-"
+      + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    final Path rawHdfsTestDir = new Path("/.reserved/raw" + testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      final Path src = new Path(hdfsTestDir, "srcfile");
+      final String rawSrcBase = "/.reserved/raw" + testdir;
+      final Path rawSrc = new Path(rawSrcBase, "srcfile");
+      fs.create(src).close();
+
+      final Path srcDir = new Path(hdfsTestDir, "srcdir");
+      final Path rawSrcDir = new Path("/.reserved/raw" + testdir, "srcdir");
+      fs.mkdirs(srcDir);
+      final Path srcDirFile = new Path(srcDir, "srcfile");
+      final Path rawSrcDirFile =
+              new Path("/.reserved/raw" + srcDirFile);
+      fs.create(srcDirFile).close();
+
+      final Path[] paths = { rawSrc, rawSrcDir, rawSrcDirFile };
+      final String[] xattrNames = { USER_A1, RAW_A1 };
+      final byte[][] xattrVals = { USER_A1_VALUE, RAW_A1_VALUE };
+
+      for (int i = 0; i < paths.length; i++) {
+        for (int j = 0; j < xattrNames.length; j++) {
+          fs.setXAttr(paths[i], xattrNames[j], xattrVals[j]);
+        }
+      }
+
+      shell = new FsShell(conf);
+
+      /* Check that a file as the source path works ok. */
+      doTestCopyCommandsWithRawXAttrs(shell, fs, src, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, src, rawHdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, rawHdfsTestDir, true);
+
+      /* Use a relative /.reserved/raw path. */
+      final Path savedWd = fs.getWorkingDirectory();
+      try {
+        fs.setWorkingDirectory(new Path(rawSrcBase));
+        final Path relRawSrc = new Path("../srcfile");
+        final Path relRawHdfsTestDir = new Path("..");
+        doTestCopyCommandsWithRawXAttrs(shell, fs, relRawSrc, relRawHdfsTestDir,
+                true);
+      } finally {
+        fs.setWorkingDirectory(savedWd);
+      }
+
+      /* Check that a directory as the source path works ok. */
+      doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, rawHdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, rawHdfsTestDir,
+        true);
+
+      /* Use relative in an absolute path. */
+      final String relRawSrcDir = "./.reserved/../.reserved/raw/../raw" +
+          testdir + "/srcdir";
+      final String relRawDstDir = "./.reserved/../.reserved/raw/../raw" +
+          testdir;
+      doTestCopyCommandsWithRawXAttrs(shell, fs, new Path(relRawSrcDir),
+          new Path(relRawDstDir), true);
+    } finally {
+      if (null != shell) {
+        shell.close();
+      }
+
+      if (null != fs) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
+  private void doTestCopyCommandsWithRawXAttrs(FsShell shell, FileSystem fs,
+      Path src, Path hdfsTestDir, boolean expectRaw) throws Exception {
+    Path target;
+    boolean srcIsRaw;
+    if (src.isAbsolute()) {
+      srcIsRaw = src.toString().contains("/.reserved/raw");
+    } else {
+      srcIsRaw = new Path(fs.getWorkingDirectory(), src).
+          toString().contains("/.reserved/raw");
+    }
+    final boolean destIsRaw = hdfsTestDir.toString().contains("/.reserved/raw");
+    final boolean srcDestMismatch = srcIsRaw ^ destIsRaw;
+
+    // -p (possibly preserve raw if src & dst are both /.r/r */
+    if (srcDestMismatch) {
+      doCopyAndTest(shell, hdfsTestDir, src, "-p", ERROR);
+    } else {
+      target = doCopyAndTest(shell, hdfsTestDir, src, "-p", SUCCESS);
+      checkXAttrs(fs, target, expectRaw, false);
+    }
+
+    // -px (possibly preserve raw, always preserve non-raw xattrs. */
+    if (srcDestMismatch) {
+      doCopyAndTest(shell, hdfsTestDir, src, "-px", ERROR);
+    } else {
+      target = doCopyAndTest(shell, hdfsTestDir, src, "-px", SUCCESS);
+      checkXAttrs(fs, target, expectRaw, true);
+    }
+
+    // no args (possibly preserve raw, never preserve non-raw xattrs. */
+    if (srcDestMismatch) {
+      doCopyAndTest(shell, hdfsTestDir, src, null, ERROR);
+    } else {
+      target = doCopyAndTest(shell, hdfsTestDir, src, null, SUCCESS);
+      checkXAttrs(fs, target, expectRaw, false);
+    }
+  }
+
+  private Path doCopyAndTest(FsShell shell, Path dest, Path src,
+      String cpArgs, int expectedExitCode) throws Exception {
+    final Path target = new Path(dest, "targetfile" +
+        counter.getAndIncrement());
+    final String[] argv = cpArgs == null ?
+        new String[] { "-cp",         src.toUri().toString(),
+            target.toUri().toString() } :
+        new String[] { "-cp", cpArgs, src.toUri().toString(),
+            target.toUri().toString() };
+    final int ret = ToolRunner.run(shell, argv);
+    assertEquals("cp -p is not working", expectedExitCode, ret);
+    return target;
+  }
+
+  private void checkXAttrs(FileSystem fs, Path target, boolean expectRaw,
+      boolean expectVanillaXAttrs) throws Exception {
+    final Map<String, byte[]> xattrs = fs.getXAttrs(target);
+    int expectedCount = 0;
+    if (expectRaw) {
+      assertArrayEquals("raw.a1 has incorrect value",
+          RAW_A1_VALUE, xattrs.get(RAW_A1));
+      expectedCount++;
+    }
+    if (expectVanillaXAttrs) {
+      assertArrayEquals("user.a1 has incorrect value",
+          USER_A1_VALUE, xattrs.get(USER_A1));
+      expectedCount++;
+    }
+    assertEquals("xattrs size mismatch", expectedCount, xattrs.size());
+  }
+
   // verify cp -ptopxa option will preserve directory attributes.
   @Test (timeout = 120000)
   public void testCopyCommandsToDirectoryWithPreserveOption()
@@ -1825,8 +1986,8 @@ public class TestDFSShell {
       final String group = status.getGroup();
       final FsPermission perm = status.getPermission();
 
-      fs.setXAttr(srcDir, "user.a1", new byte[]{0x31, 0x32, 0x33});
-      fs.setXAttr(srcDir, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+      fs.setXAttr(srcDir, USER_A1, USER_A1_VALUE);
+      fs.setXAttr(srcDir, TRUSTED_A1, TRUSTED_A1_VALUE);
 
       shell = new FsShell(conf);
 
@@ -1883,8 +2044,8 @@ public class TestDFSShell {
       assertTrue(perm.equals(targetPerm));
       xattrs = fs.getXAttrs(targetDir3);
       assertEquals(xattrs.size(), 2);
-      assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
-      assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
+      assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
       acls = fs.getAclStatus(targetDir3).getEntries();
       assertTrue(acls.isEmpty());
       assertFalse(targetPerm.getAclBit());

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Thu Aug 21 05:22:10 2014
@@ -104,7 +104,7 @@ public class TestDFSUtil {
     LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);
 
     List<LocatedBlock> ls = Arrays.asList(l1, l2);
-    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true);
+    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
 
     BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
 

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Thu Aug 21 05:22:10 2014
@@ -38,7 +38,6 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
-import java.util.concurrent.CancellationException;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.logging.impl.Log4JLogger;

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Thu Aug 21 05:22:10 2014
@@ -1142,7 +1142,7 @@ public class TestFileCreation {
           try {
             nnrpc.create(pathStr, new FsPermission((short)0755), "client",
                 new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
-                true, (short)1, 128*1024*1024L);
+                true, (short)1, 128*1024*1024L, null);
             fail("Should have thrown exception when creating '"
                 + pathStr + "'" + " by " + method);
           } catch (InvalidPathException ipe) {

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java Thu Aug 21 05:22:10 2014
@@ -18,6 +18,9 @@
 package org.apache.hadoop.hdfs;
 
 import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyList;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.anyShort;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Matchers.anyShort;
@@ -29,10 +32,12 @@ import static org.mockito.Mockito.spy;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -338,16 +343,16 @@ public class TestLease {
     Mockito.doReturn(
         new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
             (short) 777), "owner", "group", new byte[0], new byte[0],
-            1010, 0, (byte) 0)).when(mcp).getFileInfo(anyString());
+            1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString());
     Mockito
         .doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0, (byte) 0))
+                1010, 0, null, (byte) 0))
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong());
+            anyShort(), anyLong(), (List<CipherSuite>) anyList());
 
     final Configuration conf = new Configuration();
     final DFSClient c1 = createDFSClientAs(ugi[0], conf);

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java Thu Aug 21 05:22:10 2014
@@ -58,7 +58,7 @@ public class TestStorageReport {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(REPL_FACTOR)
-        .storageType(storageType)
+        .storageTypes(new StorageType[] { storageType, storageType } )
         .build();
     fs = cluster.getFileSystem();
     bpid = cluster.getNamesystem().getBlockPoolId();

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Thu Aug 21 05:22:10 2014
@@ -69,6 +69,7 @@ public class FSXAttrBaseTest {
   protected static Configuration conf;
   private static int pathCount = 0;
   protected static Path path;
+  protected static Path rawPath;
   
   // XAttrs
   protected static final String name1 = "user.a1";
@@ -78,6 +79,8 @@ public class FSXAttrBaseTest {
   protected static final byte[] value2 = {0x37, 0x38, 0x39};
   protected static final String name3 = "user.a3";
   protected static final String name4 = "user.a4";
+  protected static final String raw1 = "raw.a1";
+  protected static final String raw2 = "raw.a2";
 
   protected FileSystem fs;
 
@@ -107,6 +110,7 @@ public class FSXAttrBaseTest {
   public void setUp() throws Exception {
     pathCount += 1;
     path = new Path("/p" + pathCount);
+    rawPath = new Path("/.reserved/raw/p" + pathCount);
     initFileSystem();
   }
 
@@ -395,7 +399,8 @@ public class FSXAttrBaseTest {
       Assert.fail("expected IOException");
     } catch (Exception e) {
       GenericTestUtils.assertExceptionContains
-          ("An XAttr name must be prefixed with user/trusted/security/system, " +
+          ("An XAttr name must be prefixed with " +
+           "user/trusted/security/system/raw, " +
            "followed by a '.'",
           e);
     }
@@ -582,7 +587,7 @@ public class FSXAttrBaseTest {
 
     /* Unknown namespace should throw an exception. */
     final String expectedExceptionString = "An XAttr name must be prefixed " +
-        "with user/trusted/security/system, followed by a '.'";
+        "with user/trusted/security/system/raw, followed by a '.'";
     try {
       fs.removeXAttr(path, "wackynamespace.foo");
       Assert.fail("expected IOException");
@@ -918,6 +923,176 @@ public class FSXAttrBaseTest {
     fsAsDiana.removeXAttr(path, name2);
   }
   
+  @Test(timeout = 120000)
+  public void testRawXAttrs() throws Exception {
+    final UserGroupInformation user = UserGroupInformation.
+      createUserForTesting("user", new String[] {"mygroup"});
+
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
+    fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE,
+        XAttrSetFlag.REPLACE));
+
+    {
+      // getXAttr
+      final byte[] value = fs.getXAttr(rawPath, raw1);
+      Assert.assertArrayEquals(value, value1);
+    }
+
+    {
+      // getXAttrs
+      final Map<String, byte[]> xattrs = fs.getXAttrs(rawPath);
+      Assert.assertEquals(xattrs.size(), 1);
+      Assert.assertArrayEquals(value1, xattrs.get(raw1));
+      fs.removeXAttr(rawPath, raw1);
+    }
+
+    {
+      // replace and re-get
+      fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+      fs.setXAttr(rawPath, raw1, newValue1, EnumSet.of(XAttrSetFlag.CREATE,
+          XAttrSetFlag.REPLACE));
+
+      final Map<String,byte[]> xattrs = fs.getXAttrs(rawPath);
+      Assert.assertEquals(xattrs.size(), 1);
+      Assert.assertArrayEquals(newValue1, xattrs.get(raw1));
+
+      fs.removeXAttr(rawPath, raw1);
+    }
+
+    {
+      // listXAttrs on rawPath ensuring raw.* xattrs are returned
+      fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+      fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+
+      final List<String> xattrNames = fs.listXAttrs(rawPath);
+      assertTrue(xattrNames.contains(raw1));
+      assertTrue(xattrNames.contains(raw2));
+      assertTrue(xattrNames.size() == 2);
+      fs.removeXAttr(rawPath, raw1);
+      fs.removeXAttr(rawPath, raw2);
+    }
+
+    {
+      // listXAttrs on non-rawPath ensuring no raw.* xattrs returned
+      fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+      fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+
+      final List<String> xattrNames = fs.listXAttrs(path);
+      assertTrue(xattrNames.size() == 0);
+      fs.removeXAttr(rawPath, raw1);
+      fs.removeXAttr(rawPath, raw2);
+    }
+
+    {
+      /*
+       * Test non-root user operations in the "raw.*" namespace.
+       */
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          // Test that non-root can not set xattrs in the "raw.*" namespace
+          try {
+            // non-raw path
+            userFs.setXAttr(path, raw1, value1);
+            fail("setXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          try {
+            // raw path
+            userFs.setXAttr(rawPath, raw1, value1);
+            fail("setXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          // Test that non-root can not do getXAttrs in the "raw.*" namespace
+          try {
+            // non-raw path
+            userFs.getXAttrs(rawPath);
+            fail("getXAttrs should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          try {
+            // raw path
+            userFs.getXAttrs(path);
+            fail("getXAttrs should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          // Test that non-root can not do getXAttr in the "raw.*" namespace
+          try {
+            // non-raw path
+            userFs.getXAttr(rawPath, raw1);
+            fail("getXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          try {
+            // raw path
+            userFs.getXAttr(path, raw1);
+            fail("getXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+          return null;
+        }
+        });
+    }
+
+    {
+      /*
+       * Test that non-root can not do getXAttr in the "raw.*" namespace
+       */
+      fs.setXAttr(rawPath, raw1, value1);
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            try {
+              // non-raw path
+              userFs.getXAttr(rawPath, raw1);
+              fail("getXAttr should have thrown");
+            } catch (AccessControlException e) {
+              // ignore
+            }
+
+            try {
+              // raw path
+              userFs.getXAttr(path, raw1);
+              fail("getXAttr should have thrown");
+            } catch (AccessControlException e) {
+              // ignore
+            }
+
+            /*
+             * Test that only root can see raw.* xattrs returned from listXAttr
+             * and non-root can't do listXAttrs on /.reserved/raw.
+             */
+            // non-raw path
+            final List<String> xattrNames = userFs.listXAttrs(path);
+            assertTrue(xattrNames.size() == 0);
+            try {
+              // raw path
+              userFs.listXAttrs(rawPath);
+              fail("listXAttrs on raw path should have thrown");
+            } catch (AccessControlException e) {
+              // ignore
+            }
+
+            return null;
+          }
+        });
+      fs.removeXAttr(rawPath, raw1);
+    }
+  }
+
   /**
    * Creates a FileSystem for the super-user.
    *

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Thu Aug 21 05:22:10 2014
@@ -587,7 +587,8 @@ public class NNThroughputBenchmark imple
       // dummyActionNoSynch(fileIdx);
       nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                       clientName, new EnumSetWritable<CreateFlag>(EnumSet
-              .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
+              .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
+          replication, BLOCK_SIZE, null);
       long end = Time.now();
       for(boolean written = !closeUponCreate; !written; 
         written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
@@ -1133,7 +1134,7 @@ public class NNThroughputBenchmark imple
         String fileName = nameGenerator.getNextFileName("ThroughputBench");
         nameNodeProto.create(fileName, FsPermission.getDefault(), clientName,
             new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
-            BLOCK_SIZE);
+            BLOCK_SIZE, null);
         ExtendedBlock lastBlock = addBlocks(fileName, clientName);
         nameNodeProto.complete(fileName, clientName, lastBlock, INodeId.GRANDFATHER_INODE_ID);
       }

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java Thu Aug 21 05:22:10 2014
@@ -128,7 +128,7 @@ public class TestAddBlockRetry {
     nn.create(src, FsPermission.getFileDefault(),
         "clientName",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
-        true, (short)3, 1024);
+        true, (short)3, 1024, null);
 
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
@@ -155,7 +155,7 @@ public class TestAddBlockRetry {
     // create file
     nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
-        (short) 3, 1024);
+        (short) 3, 1024, null);
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java Thu Aug 21 05:22:10 2014
@@ -191,14 +191,19 @@ public class TestFSDirectory {
     existingXAttrs.add(xAttr1);
     existingXAttrs.add(xAttr2);
     
-    // Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
-    XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
+    // Adding system and raw namespace xAttrs aren't affected by inode
+    // xAttrs limit.
+    XAttr newSystemXAttr = (new XAttr.Builder()).
+        setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").
+        setValue(new byte[]{0x33, 0x33, 0x33}).build();
+    XAttr newRawXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).
         setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
-    List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(1);
-    newXAttrs.add(newXAttr);
+    List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(2);
+    newXAttrs.add(newSystemXAttr);
+    newXAttrs.add(newRawXAttr);
     List<XAttr> xAttrs = fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
         EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
-    assertEquals(xAttrs.size(), 3);
+    assertEquals(xAttrs.size(), 4);
     
     // Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
     XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Thu Aug 21 05:22:10 2014
@@ -1019,7 +1019,7 @@ public class TestFsck {
 
     HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
         blockSize, modTime, accessTime, perms, owner, group, symlink, path,
-        fileId, numChildren, storagePolicy);
+        fileId, numChildren, null, storagePolicy);
     Result res = new Result(conf);
 
     try {

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Thu Aug 21 05:22:10 2014
@@ -209,19 +209,20 @@ public class TestNamenodeRetryCache {
     // Two retried calls succeed
     newCall();
     HdfsFileStatus status = namesystem.startFile(src, perm, "holder",
-        "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize);
+        "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, 
+        BlockSize, null);
     Assert.assertEquals(status, namesystem.startFile(src, perm, 
         "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), 
-        true, (short) 1, BlockSize));
+        true, (short) 1, BlockSize, null));
     Assert.assertEquals(status, namesystem.startFile(src, perm, 
         "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), 
-        true, (short) 1, BlockSize));
+        true, (short) 1, BlockSize, null));
     
     // A non-retried call fails
     newCall();
     try {
       namesystem.startFile(src, perm, "holder", "clientmachine",
-          EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize);
+          EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize, null);
       Assert.fail("testCreate - expected exception is not thrown");
     } catch (IOException e) {
       // expected

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Thu Aug 21 05:22:10 2014
@@ -395,7 +395,7 @@ public class TestRetryCacheWithHA {
       this.status = client.getNamenode().create(fileName,
           FsPermission.getFileDefault(), client.getClientName(),
           new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes,
-          BlockSize);
+          BlockSize, null);
     }
 
     @Override

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Thu Aug 21 05:22:10 2014
@@ -64,7 +64,7 @@ public class TestJsonUtil {
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
         now, now + 10, new FsPermission((short) 0644), "user", "group",
         DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
-        INodeId.GRANDFATHER_INODE_ID, 0, (byte) 0);
+        INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
     System.out.println("fstatus = " + fstatus);

Modified: hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml?rev=1619293&r1=1619292&r2=1619293&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml (original)
+++ hadoop/common/branches/HDFS-6584/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml Thu Aug 21 05:22:10 2014
@@ -64,7 +64,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>name must be prefixed with user/trusted/security/system, followed by a '.'</expected-output>
+          <expected-output>name must be prefixed with user/trusted/security/system/raw, followed by a '.'</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -126,6 +126,42 @@
     </test>
     
     <test>
+      <description>setfattr : Add an xattr of raw namespace</description>
+      <test-commands>
+          <command>-fs NAMENODE -touchz /file1</command>
+          <command>-fs NAMENODE -setfattr -n raw.a1 -v 123456 /file1</command>
+      </test-commands>
+      <cleanup-commands>
+          <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+          <comparator>
+              <type>SubstringComparator</type>
+              <expected-output>setfattr: User doesn't have permission for xattr: raw.a1</expected-output>
+          </comparator>
+      </comparators>
+
+    </test>
+
+    <test>
+        <description>setfattr : Add an xattr of raw namespace</description>
+        <test-commands>
+            <command>-fs NAMENODE -touchz /file1</command>
+            <command>-fs NAMENODE -setfattr -n raw.a1 -v 123456 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -getfattr -n raw.a1 /.reserved/raw/file1</command>
+        </test-commands>
+        <cleanup-commands>
+            <command>-fs NAMENODE -rm /file1</command>
+        </cleanup-commands>
+        <comparators>
+            <comparator>
+                <type>SubstringComparator</type>
+                <expected-output>raw.a1="123456"</expected-output>
+            </comparator>
+        </comparators>
+    </test>
+
+    <test>
       <description>setfattr : Add an xattr, and encode is text</description>
       <test-commands>
         <command>-fs NAMENODE -touchz /file1</command>
@@ -256,6 +292,26 @@
         </comparator>
       </comparators>
     </test>
+
+    <test>
+        <description>setfattr : Remove an xattr of raw namespace</description>
+        <test-commands>
+            <command>-fs NAMENODE -touchz /file1</command>
+            <command>-fs NAMENODE -setfattr -n raw.a1 -v 123456 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -setfattr -n raw.a2 -v 123456 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -setfattr -x raw.a2 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -getfattr -d /.reserved/raw/file1</command>
+        </test-commands>
+        <cleanup-commands>
+            <command>-fs NAMENODE -rm /file1</command>
+        </cleanup-commands>
+        <comparators>
+            <comparator>
+                <type>SubstringComparator</type>
+		<expected-output># file: /.reserved/raw/file1#LF#raw.a1="123456"#LF#</expected-output>
+            </comparator>
+        </comparators>
+    </test>
     
     <test>
       <description>getfattr : Get an xattr</description>