You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/08/05 04:31:00 UTC

svn commit: r1615844 [4/5] - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/main/j...

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Tue Aug  5 02:30:54 2014
@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.XAttrSetFlag
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HealthCheckFailedException;
@@ -1067,7 +1068,7 @@ class NameNodeRpcServer implements Namen
       // for the same node and storage, so the value returned by the last
       // call of this loop is the final updated value for noStaleStorage.
       //
-      noStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
+      noStaleStorages = bm.processReport(nodeReg, r.getStorage(), blocks);
       metrics.incrStorageBlockReportOps();
     }
 
@@ -1103,7 +1104,7 @@ class NameNodeRpcServer implements Namen
           +" blocks.");
     }
     for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
-      namesystem.processIncrementalBlockReport(nodeReg, poolId, r);
+      namesystem.processIncrementalBlockReport(nodeReg, r);
     }
   }
 
@@ -1458,5 +1459,10 @@ class NameNodeRpcServer implements Namen
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
     namesystem.removeXAttr(src, xAttr);
   }
+
+  @Override
+  public void checkAccess(String path, FsAction mode) throws IOException {
+    namesystem.checkAccess(path, mode);
+  }
 }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Tue Aug  5 02:30:54 2014
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -112,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
+import org.apache.hadoop.hdfs.web.resources.FsActionParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
@@ -755,10 +757,12 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding,
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
-          final ExcludeDatanodesParam excludeDatanodes          
+          final ExcludeDatanodesParam excludeDatanodes,
+      @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+          final FsActionParam fsAction
       ) throws IOException, InterruptedException {
     return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
-        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes);
+        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction);
   }
 
   /** Handle HTTP GET request. */
@@ -789,11 +793,13 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding,
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
-          final ExcludeDatanodesParam excludeDatanodes
+          final ExcludeDatanodesParam excludeDatanodes,
+      @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+          final FsActionParam fsAction
       ) throws IOException, InterruptedException {
 
     init(ugi, delegation, username, doAsUser, path, op, offset, length,
-        renewer, bufferSize, xattrEncoding, excludeDatanodes);
+        renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
@@ -801,7 +807,7 @@ public class NamenodeWebHdfsMethods {
         try {
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
-              xattrNames, xattrEncoding, excludeDatanodes);
+              xattrNames, xattrEncoding, excludeDatanodes, fsAction);
         } finally {
           reset();
         }
@@ -822,7 +828,8 @@ public class NamenodeWebHdfsMethods {
       final BufferSizeParam bufferSize,
       final List<XAttrNameParam> xattrNames,
       final XAttrEncodingParam xattrEncoding,
-      final ExcludeDatanodesParam excludeDatanodes
+      final ExcludeDatanodesParam excludeDatanodes,
+      final FsActionParam fsAction
       ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NamenodeProtocols np = getRPCServer(namenode);
@@ -919,6 +926,10 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(xAttrs);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case CHECKACCESS: {
+      np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
+      return Response.ok().build();
+    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java Tue Aug  5 02:30:54 2014
@@ -17,10 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 /**
@@ -39,12 +38,15 @@ public class BlocksWithLocations {
     final Block block;
     final String[] datanodeUuids;
     final String[] storageIDs;
+    final StorageType[] storageTypes;
     
     /** constructor */
-    public BlockWithLocations(Block block, String[] datanodeUuids, String[] storageIDs) {
+    public BlockWithLocations(Block block, String[] datanodeUuids,
+        String[] storageIDs, StorageType[] storageTypes) {
       this.block = block;
       this.datanodeUuids = datanodeUuids;
       this.storageIDs = storageIDs;
+      this.storageTypes = storageTypes;
     }
     
     /** get the block */
@@ -61,7 +63,12 @@ public class BlocksWithLocations {
     public String[] getStorageIDs() {
       return storageIDs;
     }
-    
+
+    /** @return the storage types */
+    public StorageType[] getStorageTypes() {
+      return storageTypes;
+    }
+
     @Override
     public String toString() {
       final StringBuilder b = new StringBuilder();
@@ -70,12 +77,18 @@ public class BlocksWithLocations {
         return b.append("[]").toString();
       }
       
-      b.append(storageIDs[0]).append('@').append(datanodeUuids[0]);
+      appendString(0, b.append("["));
       for(int i = 1; i < datanodeUuids.length; i++) {
-        b.append(", ").append(storageIDs[i]).append("@").append(datanodeUuids[i]);
+        appendString(i, b.append(","));
       }
       return b.append("]").toString();
     }
+    
+    private StringBuilder appendString(int i, StringBuilder b) {
+      return b.append("[").append(storageTypes[i]).append("]")
+              .append(storageIDs[i])
+              .append("@").append(datanodeUuids[i]);
+    }
   }
 
   private final BlockWithLocations[] blocks;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java Tue Aug  5 02:30:54 2014
@@ -29,8 +29,8 @@ import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
 
-import com.sun.org.apache.xml.internal.serialize.OutputFormat;
-import com.sun.org.apache.xml.internal.serialize.XMLSerializer;
+import org.apache.xml.serialize.OutputFormat;
+import org.apache.xml.serialize.XMLSerializer;
 
 /**
  * An XmlEditsVisitor walks over an EditLog structure and writes out

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java Tue Aug  5 02:30:54 2014
@@ -37,7 +37,7 @@ import com.google.common.base.Preconditi
 public class EnumCounters<E extends Enum<E>> {
   /** The class of the enum. */
   private final Class<E> enumClass;
-  /** The counter array, counters[i] corresponds to the enumConstants[i]. */
+  /** An array of longs corresponding to the enum type. */
   private final long[] counters;
 
   /**
@@ -75,6 +75,13 @@ public class EnumCounters<E extends Enum
     }
   }
 
+  /** Reset all counters to zero. */
+  public final void reset() {
+    for(int i = 0; i < counters.length; i++) {
+      this.counters[i] = 0L;
+    }
+  }
+
   /** Add the given value to counter e. */
   public final void add(final E e, final long value) {
     counters[e.ordinal()] += value;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Tue Aug  5 02:30:54 2014
@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -1357,6 +1358,12 @@ public class WebHdfsFileSystem extends F
   }
 
   @Override
+  public void access(final Path path, final FsAction mode) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
+    new FsPathRunner(op, path, new FsActionParam(mode)).run();
+  }
+
+  @Override
   public ContentSummary getContentSummary(final Path p) throws IOException {
     statistics.incrementReadOps(1);
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Tue Aug  5 02:30:54 2014
@@ -39,7 +39,9 @@ public class GetOpParam extends HttpOpPa
     GETXATTRS(false, HttpURLConnection.HTTP_OK),
     LISTXATTRS(false, HttpURLConnection.HTTP_OK),
 
-    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
+
+    CHECKACCESS(false, HttpURLConnection.HTTP_OK);
 
     final boolean redirect;
     final int expectedHttpResponseCode;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Tue Aug  5 02:30:54 2014
@@ -656,6 +656,14 @@ message DeleteSnapshotRequestProto {
 message DeleteSnapshotResponseProto { // void response
 }
 
+message CheckAccessRequestProto {
+  required string path = 1;
+  required AclEntryProto.FsActionProto mode = 2;
+}
+
+message CheckAccessResponseProto { // void response
+}
+
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -785,6 +793,8 @@ service ClientNamenodeProtocol {
       returns(ListXAttrsResponseProto);
   rpc removeXAttr(RemoveXAttrRequestProto)
       returns(RemoveXAttrResponseProto);
+  rpc checkAccess(CheckAccessRequestProto)
+      returns(CheckAccessResponseProto);
   rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
       returns(CreateEncryptionZoneResponseProto);
   rpc listEncryptionZones(ListEncryptionZonesRequestProto)

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Tue Aug  5 02:30:54 2014
@@ -424,6 +424,7 @@ message BlockWithLocationsProto {
   required BlockProto block = 1;   // Block
   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
   repeated string storageUuids = 3;  // Storages with replicas of the block
+  repeated StorageTypeProto storageTypes = 4;
 }
 
 /**

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Tue Aug  5 02:30:54 2014
@@ -2053,6 +2053,14 @@
 </property>
 
 <property>
+  <name>dfs.datanode.block.id.layout.upgrade.threads</name>
+  <value>12</value>
+  <description>The number of threads to use when creating hard links from
+    current to previous blocks during upgrade of a DataNode to block ID-based
+    block layout (see HDFS-6482 for details on the layout).</description>
+</property>
+
+<property>
   <name>dfs.namenode.list.encryption.zones.num.responses</name>
   <value>100</value>
   <description>When listing encryption zones, the maximum number of zones

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Tue Aug  5 02:30:54 2014
@@ -47,18 +47,21 @@ HDFS NFS Gateway
    The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts. 
    In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
    user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
-   and users belonging to the groups 'nfs-users1'
-   and 'nfs-users2' use the NFS mounts, then in core-site.xml of the NameNode, the following
+   and users belonging to the groups 'users-group1'
+   and 'users-group2' use the NFS mounts, then in core-site.xml of the NameNode, the following
    two properities must be set and only NameNode needs restart after the configuration change
    (NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
 
 ----
 <property>
   <name>hadoop.proxyuser.nfsserver.groups</name>
-  <value>nfs-users1,nfs-users2</value>
+  <value>root,users-group1,users-group2</value>
   <description>
-         The 'nfsserver' user is allowed to proxy all members of the 'nfs-users1' and 
-         'nfs-users2' groups. Set this to '*' to allow nfsserver user to proxy any group.
+         The 'nfsserver' user is allowed to proxy all members of the 'users-group1' and 
+         'users-group2' groups. Note that in most cases you will need to include the
+         group "root" because the user "root" (which usually belonges to "root" group) will
+         generally be the user that initially executes the mount on the NFS client system. 
+         Set this to '*' to allow nfsserver user to proxy any group.
   </description>
 </property>
 ----

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Tue Aug  5 02:30:54 2014
@@ -82,6 +82,9 @@ WebHDFS REST API
     * {{{List all XAttrs}<<<LISTXATTRS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
 
+    * {{{Check access}<<<CHECKACCESS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
+
   * HTTP PUT
 
     * {{{Create and Write to a File}<<<CREATE>>>}}
@@ -927,6 +930,28 @@ Transfer-Encoding: chunked
   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
 
 
+** {Check access}
+
+  * Submit a HTTP GET request.
+  
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CHECKACCESS
+                              &fsaction=<FSACTION>
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access
+    
+
 * {Extended Attributes(XAttrs) Operations}
 
 ** {Set XAttr}
@@ -2166,6 +2191,25 @@ var tokenProperties =
   {{Proxy Users}}
 
 
+** {Fs Action}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<fsaction>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | File system operation read/write/execute |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | null (an invalid value) |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | Strings matching regex pattern \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{Check access}<<<CHECKACCESS>>>}},
+
 ** {Group}
 
 *----------------+-------------------------------------------------------------------+

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java Tue Aug  5 02:30:54 2014
@@ -47,7 +47,6 @@ import org.mockito.Mockito;
 public class TestGenericRefresh {
   private static MiniDFSCluster cluster;
   private static Configuration config;
-  private static final int NNPort = 54222;
 
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
@@ -57,8 +56,8 @@ public class TestGenericRefresh {
     config = new Configuration();
     config.set("hadoop.security.authorization", "true");
 
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
+    FileSystem.setDefaultUri(config, "hdfs://localhost:0");
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
   }
 
@@ -103,7 +102,8 @@ public class TestGenericRefresh {
   @Test
   public void testInvalidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
+    String [] args = new String[]{"-refresh", "localhost:" + 
+        cluster.getNameNodePort(), "unregisteredIdentity"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
   }
@@ -111,7 +111,8 @@ public class TestGenericRefresh {
   @Test
   public void testValidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh",
+        "localhost:" + cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should succeed", 0, exitCode);
 
@@ -124,11 +125,13 @@ public class TestGenericRefresh {
   @Test
   public void testVariableArgs() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return 2", 2, exitCode);
 
-    exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
+    exitCode = admin.run(new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one", "two"});
     assertEquals("DFSAdmin should now return 3", 3, exitCode);
 
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
@@ -141,7 +144,8 @@ public class TestGenericRefresh {
 
     // And now this should fail
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return -1", -1, exitCode);
   }
@@ -161,7 +165,8 @@ public class TestGenericRefresh {
 
     // this should trigger both
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "sharedId", "one"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
 
@@ -189,7 +194,8 @@ public class TestGenericRefresh {
 
     // We refresh both
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "shared"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
 
@@ -215,7 +221,8 @@ public class TestGenericRefresh {
     RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
 
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "exceptional"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // Exceptions result in a -1
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java Tue Aug  5 02:30:54 2014
@@ -24,6 +24,8 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.net.BindException;
+import java.util.Random;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
@@ -42,24 +44,42 @@ public class TestRefreshCallQueue {
   private FileSystem fs;
   static int mockQueueConstructions;
   static int mockQueuePuts;
-  private static final int NNPort = 54222;
-  private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl";
+  private String callQueueConfigKey = "";
+  private final Random rand = new Random();
 
   @Before
   public void setUp() throws Exception {
     // We want to count additional events, so we reset here
     mockQueueConstructions = 0;
     mockQueuePuts = 0;
+    int portRetries = 5;
+    int nnPort;
 
-    config = new Configuration();
-    config.setClass(CALLQUEUE_CONFIG_KEY,
-        MockCallQueue.class, BlockingQueue.class);
-    config.set("hadoop.security.authorization", "true");
-
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    fs = FileSystem.get(config);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
-    cluster.waitActive();
+    for (; portRetries > 0; --portRetries) {
+      // Pick a random port in the range [30000,60000).
+      nnPort = 30000 + rand.nextInt(30000);  
+      config = new Configuration();
+      callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
+      config.setClass(callQueueConfigKey,
+          MockCallQueue.class, BlockingQueue.class);
+      config.set("hadoop.security.authorization", "true");
+
+      FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
+      fs = FileSystem.get(config);
+      
+      try {
+        cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
+        cluster.waitActive();
+        break;
+      } catch (BindException be) {
+        // Retry with a different port number.
+      }
+    }
+    
+    if (portRetries == 0) {
+      // Bail if we get very unlucky with our choice of ports.
+      fail("Failed to pick an ephemeral port for the NameNode RPC server.");
+    }
   }
 
   @After

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Aug  5 02:30:54 2014
@@ -2353,8 +2353,8 @@ public class MiniDFSCluster {
    * @return data file corresponding to the block
    */
   public static File getBlockFile(File storageDir, ExtendedBlock blk) {
-    return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
-        blk.getBlockName());
+    return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+        blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName());
   }
 
   /**
@@ -2364,10 +2364,32 @@ public class MiniDFSCluster {
    * @return metadata file corresponding to the block
    */
   public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
-    return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
-        blk.getBlockName() + "_" + blk.getGenerationStamp() +
-        Block.METADATA_EXTENSION);
-    
+    return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+        blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" +
+        blk.getGenerationStamp() + Block.METADATA_EXTENSION);
+  }
+
+  /**
+   * Return all block metadata files in given directory (recursive search)
+   */
+  public static List<File> getAllBlockMetadataFiles(File storageDir) {
+    List<File> results = new ArrayList<File>();
+    File[] files = storageDir.listFiles();
+    if (files == null) {
+      return null;
+    }
+    for (File f : files) {
+      if (f.getName().startsWith("blk_") && f.getName().endsWith(
+          Block.METADATA_EXTENSION)) {
+        results.add(f);
+      } else if (f.isDirectory()) {
+        List<File> subdirResults = getAllBlockMetadataFiles(f);
+        if (subdirResults != null) {
+          results.addAll(subdirResults);
+        }
+      }
+    }
+    return results;
   }
 
   /**

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java Tue Aug  5 02:30:54 2014
@@ -52,6 +52,7 @@ import org.apache.hadoop.io.retry.Defaul
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.StandardSocketFactory;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -89,6 +90,11 @@ public class TestDFSClientFailover {
     cluster.shutdown();
   }
 
+  @After
+  public void clearConfig() {
+    SecurityUtil.setTokenServiceUseIp(true);
+  }
+
   /**
    * Make sure that client failover works when an active NN dies and the standby
    * takes over.
@@ -323,6 +329,7 @@ public class TestDFSClientFailover {
   /**
    * Test to verify legacy proxy providers are correctly wrapped.
    */
+  @Test
   public void testWrappedFailoverProxyProvider() throws Exception {
     // setup the config with the dummy provider class
     Configuration config = new HdfsConfiguration(conf);
@@ -332,6 +339,9 @@ public class TestDFSClientFailover {
         DummyLegacyFailoverProxyProvider.class.getName());
     Path p = new Path("hdfs://" + logicalName + "/");
 
+    // not to use IP address for token service
+    SecurityUtil.setTokenServiceUseIp(false);
+
     // Logical URI should be used.
     assertTrue("Legacy proxy providers should use logical URI.",
         HAUtil.useLogicalUri(config, p.toUri()));
@@ -340,6 +350,7 @@ public class TestDFSClientFailover {
   /**
    * Test to verify IPFailoverProxyProvider is not requiring logical URI.
    */
+  @Test
   public void testIPFailoverProxyProviderLogicalUri() throws Exception {
     // setup the config with the IP failover proxy provider class
     Configuration config = new HdfsConfiguration(conf);

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Tue Aug  5 02:30:54 2014
@@ -79,8 +79,8 @@ public class TestDFSFinalize {
     File dnCurDirs[] = new File[dataNodeDirs.length];
     for (int i = 0; i < dataNodeDirs.length; i++) {
       dnCurDirs[i] = new File(dataNodeDirs[i],"current");
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]),
-                   UpgradeUtilities.checksumMasterDataNodeContents());
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
+              false), UpgradeUtilities.checksumMasterDataNodeContents());
     }
     for (int i = 0; i < nameNodeDirs.length; i++) {
       assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
@@ -96,8 +96,9 @@ public class TestDFSFinalize {
         assertFalse(new File(bpRoot,"previous").isDirectory());
         
         File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
-        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir),
-                     UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
+        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+                bpCurFinalizeDir, true),
+                UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
       }
     }
   }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Tue Aug  5 02:30:54 2014
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
@@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
@@ -421,6 +425,79 @@ public class TestDFSPermission {
     }
   }
 
+  @Test
+  public void testAccessOwner() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p1 = new Path("/p1");
+    rootFs.mkdirs(p1);
+    rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.setPermission(p1, new FsPermission((short) 0444));
+    fs.access(p1, FsAction.READ);
+    try {
+      fs.access(p1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path("/bad/bad");
+    try {
+      fs.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessGroupMember() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p2 = new Path("/p2");
+    rootFs.mkdirs(p2);
+    rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
+    rootFs.setPermission(p2, new FsPermission((short) 0740));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p2, FsAction.READ);
+    try {
+      fs.access(p2, FsAction.EXECUTE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessOthers() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p3 = new Path("/p3");
+    rootFs.mkdirs(p3);
+    rootFs.setPermission(p3, new FsPermission((short) 0774));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p3, FsAction.READ);
+    try {
+      fs.access(p3, FsAction.READ_WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
   /* Check if namenode performs permission checking correctly 
    * for the given user for operations mkdir, open, setReplication, 
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Tue Aug  5 02:30:54 2014
@@ -81,7 +81,7 @@ public class TestDFSRollback {
         break;
       case DATA_NODE:
         assertEquals(
-            UpgradeUtilities.checksumContents(nodeType, curDir),
+            UpgradeUtilities.checksumContents(nodeType, curDir, false),
             UpgradeUtilities.checksumMasterDataNodeContents());
         break;
       }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java Tue Aug  5 02:30:54 2014
@@ -239,7 +239,7 @@ public class TestDFSStorageStateRecovery
         assertTrue(new File(baseDirs[i],"previous").isDirectory());
         assertEquals(
                      UpgradeUtilities.checksumContents(
-                                                       NAME_NODE, new File(baseDirs[i],"previous")),
+                     NAME_NODE, new File(baseDirs[i],"previous"), false),
                      UpgradeUtilities.checksumMasterNameNodeContents());
       }
     }
@@ -259,7 +259,8 @@ public class TestDFSStorageStateRecovery
     if (currentShouldExist) {
       for (int i = 0; i < baseDirs.length; i++) {
         assertEquals(
-                     UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"current")),
+                     UpgradeUtilities.checksumContents(DATA_NODE,
+                     new File(baseDirs[i],"current"), false),
                      UpgradeUtilities.checksumMasterDataNodeContents());
       }
     }
@@ -267,7 +268,8 @@ public class TestDFSStorageStateRecovery
       for (int i = 0; i < baseDirs.length; i++) {
         assertTrue(new File(baseDirs[i],"previous").isDirectory());
         assertEquals(
-                     UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"previous")),
+                     UpgradeUtilities.checksumContents(DATA_NODE,
+                     new File(baseDirs[i],"previous"), false),
                      UpgradeUtilities.checksumMasterDataNodeContents());
       }
     }
@@ -290,8 +292,8 @@ public class TestDFSStorageStateRecovery
     if (currentShouldExist) {
       for (int i = 0; i < baseDirs.length; i++) {
         File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
-        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir),
-                     UpgradeUtilities.checksumMasterBlockPoolContents());
+        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
+                false), UpgradeUtilities.checksumMasterBlockPoolContents());
       }
     }
     if (previousShouldExist) {
@@ -299,8 +301,8 @@ public class TestDFSStorageStateRecovery
         File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
         assertTrue(bpPrevDir.isDirectory());
         assertEquals(
-                     UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir),
-                     UpgradeUtilities.checksumMasterBlockPoolContents());
+                     UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
+                     false), UpgradeUtilities.checksumMasterBlockPoolContents());
       }
     }
   }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java Tue Aug  5 02:30:54 2014
@@ -100,7 +100,7 @@ public class TestDFSUpgrade {
       
       File previous = new File(baseDir, "previous");
       assertExists(previous);
-      assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous),
+      assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false),
           UpgradeUtilities.checksumMasterNameNodeContents());
     }
   }
@@ -114,23 +114,25 @@ public class TestDFSUpgrade {
   void checkDataNode(String[] baseDirs, String bpid) throws IOException {
     for (int i = 0; i < baseDirs.length; i++) {
       File current = new File(baseDirs[i], "current/" + bpid + "/current");
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false),
         UpgradeUtilities.checksumMasterDataNodeContents());
       
       // block files are placed under <sd>/current/<bpid>/current/finalized
       File currentFinalized = 
         MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, currentFinalized),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+          currentFinalized, true),
           UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
       
       File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
       assertTrue(previous.isDirectory());
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false),
           UpgradeUtilities.checksumMasterDataNodeContents());
       
       File previousFinalized = 
         new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previousFinalized),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+          previousFinalized, true),
           UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
       
     }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Tue Aug  5 02:30:54 2014
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileReader;
 import java.io.IOException;
@@ -80,7 +81,7 @@ public class TestDFSUpgradeFromImage {
     long checksum;
   }
   
-  private static final Configuration upgradeConf;
+  static final Configuration upgradeConf;
   
   static {
     upgradeConf = new HdfsConfiguration();
@@ -95,7 +96,7 @@ public class TestDFSUpgradeFromImage {
   
   boolean printChecksum = false;
   
-  private void unpackStorage(String tarFileName)
+  void unpackStorage(String tarFileName, String referenceName)
       throws IOException {
     String tarFile = System.getProperty("test.cache.data", "build/test/cache")
         + "/" + tarFileName;
@@ -110,7 +111,7 @@ public class TestDFSUpgradeFromImage {
     
     BufferedReader reader = new BufferedReader(new FileReader(
         System.getProperty("test.cache.data", "build/test/cache")
-            + "/" + HADOOP_DFS_DIR_TXT));
+            + "/" + referenceName));
     String line;
     while ( (line = reader.readLine()) != null ) {
       
@@ -285,7 +286,7 @@ public class TestDFSUpgradeFromImage {
    */
   @Test
   public void testUpgradeFromRel22Image() throws IOException {
-    unpackStorage(HADOOP22_IMAGE);
+    unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
     upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
         numDataNodes(4));
   }
@@ -296,7 +297,7 @@ public class TestDFSUpgradeFromImage {
    */
   @Test
   public void testUpgradeFromCorruptRel22Image() throws IOException {
-    unpackStorage(HADOOP22_IMAGE);
+    unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
     
     // Overwrite the md5 stored in the VERSION files
     File baseDir = new File(MiniDFSCluster.getBaseDirectory());
@@ -333,7 +334,7 @@ public class TestDFSUpgradeFromImage {
    */
   @Test
   public void testUpgradeFromRel1ReservedImage() throws Exception {
-    unpackStorage(HADOOP1_RESERVED_IMAGE);
+    unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
     MiniDFSCluster cluster = null;
     // Try it once without setting the upgrade flag to ensure it fails
     final Configuration conf = new Configuration();
@@ -403,7 +404,7 @@ public class TestDFSUpgradeFromImage {
    */
   @Test
   public void testUpgradeFromRel023ReservedImage() throws Exception {
-    unpackStorage(HADOOP023_RESERVED_IMAGE);
+    unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
     MiniDFSCluster cluster = null;
     // Try it once without setting the upgrade flag to ensure it fails
     final Configuration conf = new Configuration();
@@ -468,7 +469,7 @@ public class TestDFSUpgradeFromImage {
    */
   @Test
   public void testUpgradeFromRel2ReservedImage() throws Exception {
-    unpackStorage(HADOOP2_RESERVED_IMAGE);
+    unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
     MiniDFSCluster cluster = null;
     // Try it once without setting the upgrade flag to ensure it fails
     final Configuration conf = new Configuration();
@@ -572,7 +573,7 @@ public class TestDFSUpgradeFromImage {
     } while (dirList.hasMore());
   }
   
-  private void upgradeAndVerify(MiniDFSCluster.Builder bld)
+  void upgradeAndVerify(MiniDFSCluster.Builder bld)
       throws IOException {
     MiniDFSCluster cluster = null;
     try {
@@ -601,7 +602,7 @@ public class TestDFSUpgradeFromImage {
    */
   @Test
   public void testUpgradeFromRel1BBWImage() throws IOException {
-    unpackStorage(HADOOP1_BBW_IMAGE);
+    unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT);
     Configuration conf = new Configuration(upgradeConf);
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, 
         System.getProperty("test.build.data") + File.separator + 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Tue Aug  5 02:30:54 2014
@@ -445,19 +445,14 @@ public class TestDatanodeBlockScanner {
   
   @Test
   public void testReplicaInfoParsing() throws Exception {
-    testReplicaInfoParsingSingle(BASE_PATH, new int[0]);
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1", new int[]{1});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir43", new int[]{43});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3", new int[]{1, 2, 3});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir43", new int[]{1, 2, 43});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir23/subdir3", new int[]{1, 23, 3});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir13/subdir2/subdir3", new int[]{13, 2, 3});
+    testReplicaInfoParsingSingle(BASE_PATH);
+    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1");
+    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3");
   }
   
-  private static void testReplicaInfoParsingSingle(String subDirPath, int[] expectedSubDirs) {
+  private static void testReplicaInfoParsingSingle(String subDirPath) {
     File testFile = new File(subDirPath);
-    assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs);
-    assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath);
+    assertEquals(BASE_PATH, ReplicaInfo.parseBaseDir(testFile).baseDirPath);
   }
 
   @Test

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Tue Aug  5 02:30:54 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -32,6 +33,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -39,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.junit.Assert;
 import org.junit.Test;
@@ -169,6 +172,7 @@ public class TestFileAppend{
       }
 
     } finally {
+      client.close();
       fs.close();
       cluster.shutdown();
     }
@@ -380,4 +384,57 @@ public class TestFileAppend{
     }
   }
 
+  /**
+   * Old replica of the block should not be accepted as valid for append/read
+   */
+  @Test
+  public void testFailedAppendBlockRejection() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
+        "false");
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
+        .build();
+    DistributedFileSystem fs = null;
+    try {
+      fs = cluster.getFileSystem();
+      Path path = new Path("/test");
+      FSDataOutputStream out = fs.create(path);
+      out.writeBytes("hello\n");
+      out.close();
+
+      // stop one datanode
+      DataNodeProperties dnProp = cluster.stopDataNode(0);
+      String dnAddress = dnProp.datanode.getXferAddress().toString();
+      if (dnAddress.startsWith("/")) {
+        dnAddress = dnAddress.substring(1);
+      }
+
+      // append again to bump genstamps
+      for (int i = 0; i < 2; i++) {
+        out = fs.append(path);
+        out.writeBytes("helloagain\n");
+        out.close();
+      }
+
+      // re-open and make the block state as underconstruction
+      out = fs.append(path);
+      cluster.restartDataNode(dnProp, true);
+      // wait till the block report comes
+      Thread.sleep(2000);
+      // check the block locations, this should not contain restarted datanode
+      BlockLocation[] locations = fs.getFileBlockLocations(path, 0,
+          Long.MAX_VALUE);
+      String[] names = locations[0].getNames();
+      for (String node : names) {
+        if (node.equals(dnAddress)) {
+          fail("Failed append should not be present in latest block locations.");
+        }
+      }
+      out.close();
+    } finally {
+      IOUtils.closeStream(fs);
+      cluster.shutdown();
+    }
+  }
+
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Tue Aug  5 02:30:54 2014
@@ -27,6 +27,7 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,6 +36,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -137,13 +139,15 @@ public class TestFileCorruption {
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       File storageDir = cluster.getInstanceStorageDir(0, 0);
       File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+      assertTrue("Data directory does not exist", dataDir.exists());
       ExtendedBlock blk = getBlock(bpid, dataDir);
       if (blk == null) {
         storageDir = cluster.getInstanceStorageDir(0, 1);
         dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         blk = getBlock(bpid, dataDir);
       }
-      assertFalse(blk==null);
+      assertFalse("Data directory does not contain any blocks or there was an "
+          + "IO error", blk==null);
 
       // start a third datanode
       cluster.startDataNodes(conf, 1, true, null, null);
@@ -174,33 +178,15 @@ public class TestFileCorruption {
     
   }
   
-  private ExtendedBlock getBlock(String bpid, File dataDir) {
-    assertTrue("data directory does not exist", dataDir.exists());
-    File[] blocks = dataDir.listFiles();
-    assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));
-
-    int idx = 0;
-    String blockFileName = null;
-    for (; idx < blocks.length; idx++) {
-      blockFileName = blocks[idx].getName();
-      if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) {
-        break;
-      }
-    }
-    if (blockFileName == null) {
+  public static ExtendedBlock getBlock(String bpid, File dataDir) {
+    List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
+    if (metadataFiles == null || metadataFiles.isEmpty()) {
       return null;
     }
-    long blockId = Long.parseLong(blockFileName.substring("blk_".length()));
-    long blockTimeStamp = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
-    for (idx=0; idx < blocks.length; idx++) {
-      String fileName = blocks[idx].getName();
-      if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) {
-        int startIndex = blockFileName.length()+1;
-        int endIndex = fileName.length() - ".meta".length();
-        blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex));
-        break;
-      }
-    }
-    return new ExtendedBlock(bpid, blockId, blocks[idx].length(), blockTimeStamp);
+    File metadataFile = metadataFiles.get(0);
+    File blockFile = Block.metaToBlockFile(metadataFile);
+    return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()),
+        blockFile.length(), Block.getGenerationStamp(metadataFile.getName()));
   }
+
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Tue Aug  5 02:30:54 2014
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -47,6 +49,8 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -297,7 +301,8 @@ public class TestSafeMode {
    * assert that they are either allowed or fail as expected.
    */
   @Test
-  public void testOperationsWhileInSafeMode() throws IOException {
+  public void testOperationsWhileInSafeMode() throws IOException,
+      InterruptedException {
     final Path file1 = new Path("/file1");
 
     assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
@@ -407,6 +412,22 @@ public class TestSafeMode {
       fail("getAclStatus failed while in SM");
     }
 
+    // Test access
+    UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
+    FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws IOException {
+        return FileSystem.get(conf);
+      }
+    });
+    myfs.access(file1, FsAction.READ);
+    try {
+      myfs.access(file1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
     assertFalse("Could not leave SM",
         dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Tue Aug  5 02:30:54 2014
@@ -158,21 +158,23 @@ public class UpgradeUtilities {
       FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
     }
     namenodeStorageChecksum = checksumContents(NAME_NODE, 
-        new File(namenodeStorage, "current"));
+        new File(namenodeStorage, "current"), false);
     File dnCurDir = new File(datanodeStorage, "current");
-    datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir);
+    datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
     
     File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
         "current");
-    blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir);
+    blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
     
     File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
         "current/"+DataStorage.STORAGE_DIR_FINALIZED);
-    blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir);
+    blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE,
+        bpCurFinalizeDir, true);
     
     File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
         "current/"+DataStorage.STORAGE_DIR_RBW);
-    blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir);
+    blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir,
+        false);
   }
   
   // Private helper method that writes a file to the given file system.
@@ -266,36 +268,47 @@ public class UpgradeUtilities {
   
   /**
    * Compute the checksum of all the files in the specified directory.
-   * The contents of subdirectories are not included. This method provides
-   * an easy way to ensure equality between the contents of two directories.
+   * This method provides an easy way to ensure equality between the contents
+   * of two directories.
    *
    * @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
    *    This is because this file file is changed every time
    *    the Datanode is started.
-   * @param dir must be a directory. Subdirectories are ignored.
+   * @param dir must be a directory
+   * @param recursive whether or not to consider subdirectories
    *
    * @throws IllegalArgumentException if specified directory is not a directory
    * @throws IOException if an IOException occurs while reading the files
    * @return the computed checksum value
    */
-  public static long checksumContents(NodeType nodeType, File dir) throws IOException {
+  public static long checksumContents(NodeType nodeType, File dir,
+      boolean recursive) throws IOException {
+    CRC32 checksum = new CRC32();
+    checksumContentsHelper(nodeType, dir, checksum, recursive);
+    return checksum.getValue();
+  }
+
+  public static void checksumContentsHelper(NodeType nodeType, File dir,
+      CRC32 checksum, boolean recursive) throws IOException {
     if (!dir.isDirectory()) {
       throw new IllegalArgumentException(
-                                         "Given argument is not a directory:" + dir);
+          "Given argument is not a directory:" + dir);
     }
     File[] list = dir.listFiles();
     Arrays.sort(list);
-    CRC32 checksum = new CRC32();
     for (int i = 0; i < list.length; i++) {
       if (!list[i].isFile()) {
+        if (recursive) {
+          checksumContentsHelper(nodeType, list[i], checksum, recursive);
+        }
         continue;
       }
 
       // skip VERSION and dfsUsed file for DataNodes
-      if (nodeType == DATA_NODE && 
-         (list[i].getName().equals("VERSION") || 
-         list[i].getName().equals("dfsUsed"))) {
-        continue; 
+      if (nodeType == DATA_NODE &&
+          (list[i].getName().equals("VERSION") ||
+              list[i].getName().equals("dfsUsed"))) {
+        continue;
       }
 
       FileInputStream fis = null;
@@ -312,7 +325,6 @@ public class UpgradeUtilities {
         }
       }
     }
-    return checksum.getValue();
   }
   
   /**

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1615844&r1=1615843&r2=1615844&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Tue Aug  5 02:30:54 2014
@@ -184,8 +184,10 @@ public class TestPBHelper {
   private static BlockWithLocations getBlockWithLocations(int bid) {
     final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
     final String[] storageIDs = {"s1", "s2", "s3"};
+    final StorageType[] storageTypes = {
+        StorageType.DISK, StorageType.DISK, StorageType.DISK};
     return new BlockWithLocations(new Block(bid, 0, 1),
-        datanodeUuids, storageIDs);
+        datanodeUuids, storageIDs, storageTypes);
   }
 
   private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {