You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2011/11/02 06:35:26 UTC

svn commit: r1196458 [8/9] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ ...

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java Wed Nov  2 05:34:31 2011
@@ -19,8 +19,23 @@ package org.apache.hadoop.hdfs.web.resou
 
 /** Long parameter. */
 abstract class LongParam extends Param<Long, LongParam.Domain> {
-  LongParam(final Domain domain, final Long value) {
+  LongParam(final Domain domain, final Long value, final Long min, final Long max) {
     super(domain, value);
+    checkRange(min, max);
+  }
+
+  private void checkRange(final Long min, final Long max) {
+    if (value == null) {
+      return;
+    }
+    if (min != null && value < min) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " < " + domain.toString(min));
+    }
+    if (max != null && value > max) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " > " + domain.toString(max));
+    }
   }
   
   @Override
@@ -49,7 +64,12 @@ abstract class LongParam extends Param<L
 
     @Override
     Long parse(final String str) {
-      return NULL.equals(str)? null: Long.parseLong(str, radix);
+      try {
+        return NULL.equals(str)? null: Long.parseLong(str, radix);
+      } catch(NumberFormatException e) {
+        throw new IllegalArgumentException("Failed to parse \"" + str
+            + "\" as a radix-" + radix + " long integer.", e);
+      }
     }
 
     /** Convert a Short to a String. */ 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java Wed Nov  2 05:34:31 2011
@@ -31,7 +31,7 @@ public class ModificationTimeParam exten
    * @param value the parameter value.
    */
   public ModificationTimeParam(final Long value) {
-    super(DOMAIN, value);
+    super(DOMAIN, value, -1L, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java Wed Nov  2 05:34:31 2011
@@ -31,7 +31,7 @@ public class OffsetParam extends LongPar
    * @param value the parameter value.
    */
   public OffsetParam(final Long value) {
-    super(DOMAIN, value);
+    super(DOMAIN, value, 0L, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java Wed Nov  2 05:34:31 2011
@@ -27,13 +27,15 @@ public class PermissionParam extends Sho
   public static final String DEFAULT = NULL;
 
   private static final Domain DOMAIN = new Domain(NAME, 8);
+
+  private static final short DEFAULT_PERMISSION = 0755;
   
   /**
    * Constructor.
    * @param value the parameter value.
    */
   public PermissionParam(final FsPermission value) {
-    super(DOMAIN, value == null? null: value.toShort());
+    super(DOMAIN, value == null? null: value.toShort(), null, null);
   }
 
   /**
@@ -41,7 +43,7 @@ public class PermissionParam extends Sho
    * @param str a string representation of the parameter value.
    */
   public PermissionParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str));
+    super(DOMAIN, DOMAIN.parse(str), (short)0, (short)01777);
   }
 
   @Override
@@ -51,7 +53,7 @@ public class PermissionParam extends Sho
 
   /** @return the represented FsPermission. */
   public FsPermission getFsPermission() {
-    final Short mode = getValue();
-    return mode == null? FsPermission.getDefault(): new FsPermission(mode);
+    final Short v = getValue();
+    return new FsPermission(v != null? v: DEFAULT_PERMISSION);
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java Wed Nov  2 05:34:31 2011
@@ -33,6 +33,9 @@ public class PutOpParam extends HttpOpPa
     SETPERMISSION(false, HttpURLConnection.HTTP_OK),
     SETTIMES(false, HttpURLConnection.HTTP_OK),
     
+    RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
+    CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
+    
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
     final boolean doOutput;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java Wed Nov  2 05:34:31 2011
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+
+import org.apache.hadoop.conf.Configuration;
+
 /** Replication parameter. */
 public class ReplicationParam extends ShortParam {
   /** Parameter name. */
@@ -31,7 +36,7 @@ public class ReplicationParam extends Sh
    * @param value the parameter value.
    */
   public ReplicationParam(final Short value) {
-    super(DOMAIN, value);
+    super(DOMAIN, value, (short)1, null);
   }
 
   /**
@@ -46,4 +51,10 @@ public class ReplicationParam extends Sh
   public String getName() {
     return NAME;
   }
+
+  /** @return the value or, if it is null, return the default from conf. */
+  public short getValue(final Configuration conf) {
+    return getValue() != null? getValue()
+        : (short)conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
+  }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java Wed Nov  2 05:34:31 2011
@@ -19,8 +19,24 @@ package org.apache.hadoop.hdfs.web.resou
 
 /** Short parameter. */
 abstract class ShortParam extends Param<Short, ShortParam.Domain> {
-  ShortParam(final Domain domain, final Short value) {
+  ShortParam(final Domain domain, final Short value,
+      final Short min, final Short max) {
     super(domain, value);
+    checkRange(min, max);
+  }
+
+  private void checkRange(final Short min, final Short max) {
+    if (value == null) {
+      return;
+    }
+    if (min != null && value < min) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " < " + domain.toString(min));
+    }
+    if (max != null && value > max) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " > " + domain.toString(max));
+    }
   }
   
   @Override
@@ -49,7 +65,12 @@ abstract class ShortParam extends Param<
 
     @Override
     Short parse(final String str) {
-      return NULL.equals(str)? null: Short.parseShort(str, radix);
+      try {
+        return NULL.equals(str)? null: Short.parseShort(str, radix);
+      } catch(NumberFormatException e) {
+        throw new IllegalArgumentException("Failed to parse \"" + str
+            + "\" as a radix-" + radix + " short integer.", e);
+      }
     }
 
     /** Convert a Short to a String. */ 

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java Wed Nov  2 05:34:31 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.web.resou
 import java.io.IOException;
 import java.lang.reflect.Type;
 
+import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.ext.Provider;
@@ -42,13 +43,14 @@ public class UserProvider
     extends AbstractHttpContextInjectable<UserGroupInformation>
     implements InjectableProvider<Context, Type> {
   @Context HttpServletRequest request;
+  @Context ServletContext servletcontext;
 
   @Override
   public UserGroupInformation getValue(final HttpContext context) {
-    final Configuration conf = (Configuration)context.getProperties().get(
-        JspHelper.CURRENT_CONF);
+    final Configuration conf = (Configuration) servletcontext
+        .getAttribute(JspHelper.CURRENT_CONF);
     try {
-      return JspHelper.getUGI(null, request, conf,
+      return JspHelper.getUGI(servletcontext, request, conf,
           AuthenticationMethod.KERBEROS, false);
     } catch (IOException e) {
       throw new RuntimeException(e);

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 05:34:31 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1179483
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1196451
 /hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
 /hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
 /hadoop/core/trunk/src/c++/libhdfs:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 05:34:31 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1179483
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1196451
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 05:34:31 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1179483
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1196451
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 05:34:31 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1179483
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1196451
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto Wed Nov  2 05:34:31 2011
@@ -40,6 +40,17 @@ message OpReadBlockProto {
   required uint64 offset = 2;
   required uint64 len = 3;
 }
+
+
+message ChecksumProto {
+  enum ChecksumType {
+    NULL = 0;
+    CRC32 = 1;
+    CRC32C = 2;
+  }
+  required ChecksumType type = 1;
+  required uint32 bytesPerChecksum = 2;
+}
   
 message OpWriteBlockProto {
   required ClientOperationHeaderProto header = 1;
@@ -69,6 +80,11 @@ message OpWriteBlockProto {
   required uint64 minBytesRcvd = 6;
   required uint64 maxBytesRcvd = 7;
   required uint64 latestGenerationStamp = 8;
+
+  /**
+   * The requested checksum mechanism for this block write.
+   */
+  required ChecksumProto requestedChecksum = 9;
 }
   
 message OpTransferBlockProto {
@@ -114,11 +130,30 @@ message PipelineAckProto {
   repeated Status status = 2;
 }
 
+/**
+ * Sent as part of the BlockOpResponseProto
+ * for READ_BLOCK and COPY_BLOCK operations.
+ */
+message ReadOpChecksumInfoProto {
+  required ChecksumProto checksum = 1;
+
+  /**
+   * The offset into the block at which the first packet
+   * will start. This is necessary since reads will align
+   * backwards to a checksum chunk boundary.
+   */
+  required uint64 chunkOffset = 2;
+}
+
 message BlockOpResponseProto {
   required Status status = 1;
 
   optional string firstBadLink = 2;
   optional OpBlockChecksumResponseProto checksumResponse = 3;
+  optional ReadOpChecksumInfoProto readOpChecksumInfo = 4;
+
+  /** explanatory text which may be useful to log on the client side */
+  optional string message = 5;
 }
 
 /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto Wed Nov  2 05:34:31 2011
@@ -23,13 +23,19 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "HdfsProtos";
 option java_generate_equals_and_hash = true;
 
+/**
+ * Extended block idenfies a block
+ */
 message ExtendedBlockProto {
-  required string poolId = 1;
-  required uint64 blockId = 2;
-  required uint64 numBytes = 3;
-  required uint64 generationStamp = 4;
+  required string poolId = 1;   // Block pool id - gloablly unique across clusters
+  required uint64 blockId = 2;  // the local id within a pool
+  required uint64 generationStamp = 3;
+  optional uint64 numBytes = 4;  // block len does not belong in ebid - here for historical reasons
 }
 
+/**
+ * Block Token
+ */
 message BlockTokenIdentifierProto {
   required bytes identifier = 1;
   required bytes password = 2;
@@ -37,12 +43,20 @@ message BlockTokenIdentifierProto {
   required string service = 4;
 }
 
+/**
+ * Identifies a Datanode
+ */
 message DatanodeIDProto {
-  required string name = 1;
-  required string storageID = 2;
-  required uint32 infoPort = 3;
+  required string name = 1;      // hostname:portNumber
+  required string storageID = 2; // Unique storage id
+  required uint32 infoPort = 3;  // the port where the infoserver is running
+  required uint32 ipcPort = 4;   // the port where the ipc Server is running
 }
 
+
+/**
+ * The status of a Datanode
+ */
 message DatanodeInfoProto {
   required DatanodeIDProto id = 1;
   optional uint64 capacity = 2;
@@ -62,3 +76,116 @@ message DatanodeInfoProto {
   optional AdminState adminState = 10;
 }
 
+
+/**
+ * Summary of a file or directory
+ */
+message ContentSummaryProto {
+  required uint64 length = 1;
+  required uint64 fileCount = 2;
+  required uint64 directoryCount = 3;
+  required uint64 quota = 4;
+  required uint64 spaceConsumed = 5;
+  required uint64 spaceQuota = 6;
+}
+
+/**
+ * Contains a list of paths corresponding to corrupt files and a cookie
+ * used for iterative calls to NameNode.listCorruptFileBlocks.
+ *
+ */
+message CorruptFileBlocksProto {
+ repeated string files = 1;
+ required string   cookie = 2;
+}
+
+/**
+ * File or Directory permision - same spec as posix
+ */
+message FsPermissionProto {
+  required uint32 perm = 1;       // Actually a short - only 16bits used
+}
+
+
+/**
+ * A LocatedBlock gives information about a block and its location.
+ */ 
+message LocatedBlockProto {
+  required ExtendedBlockProto b  = 1;
+  required uint64 offset = 2;           // offset of first byte of block in the file
+  repeated DatanodeInfoProto locs = 3;  // Locations ordered by proximity to client ip
+  required bool corrupt = 4;            // true if all replicas of a block are corrupt, else false
+                                        // If block has few corrupt replicas, they are filtered and 
+                                        // their locations are not part of this object
+
+  required BlockTokenIdentifierProto blockToken = 5;
+ }
+
+
+/**
+ * A set of file blocks and their locations.
+ */
+message LocatedBlocksProto {
+  required uint64 fileLength = 1;
+  repeated LocatedBlockProto blocks = 2;
+  required bool underConstruction = 3;
+  optional LocatedBlockProto lastBlock = 4;
+  required bool isLastBlockComplete = 5;
+}
+
+
+/**
+ * Status of a file, directory  or symlink
+ * Optionally includes a file's block locations if requested by client on the rpc call.
+ */
+message HdfsFileStatusProto {
+  enum FileType {
+    IS_DIR = 1;
+    IS_FILE = 2;
+    IS_SYMLINK = 3;
+  }
+  required FileType fileType = 1;
+  required bytes path = 2;          // local name of inode encoded java UTF8
+  required uint64 length = 3;
+  required FsPermissionProto permission = 4;
+  required string owner = 5;
+  required string group = 6;
+  required uint64 modification_time = 7;
+  required uint64 access_time = 8;
+  //
+  // Optional fields for symlink
+  optional bytes symlink = 9;         // if symlink, target encoded java UTF8 
+  //
+  // Optional fields for file
+  optional uint32 block_replication = 10; // Actually a short - only 16bits used
+  optional uint64 blocksize = 11;
+  optional LocatedBlocksProto locations = 12;  // suppled only if asked by client
+} 
+
+/**
+ * HDFS Server Defaults
+ */
+message FsServerDefaultsProto {
+  required uint64 blockSize = 1;
+  required uint32 bytesPerChecksum = 2;
+  required uint32 writePacketSize = 3;
+  required uint32 replication = 4; // Actually a short - only 16bits used
+  required uint32 fileBufferSize = 5;
+}
+
+
+/**
+ * Directory listing
+ */
+message DirectoryListingProto {
+  repeated HdfsFileStatusProto partialListing = 1;
+  required uint32 remainingEntries  = 2;
+}
+
+/**
+ * Status of current cluster upgrade from one version to another
+ */
+message UpgradeStatusReportProto {
+  required uint32 version = 1;;
+  required uint32 upgradeStatus = 2; // Between 0 and 100 indicating the % complete
+}

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/
            ('svn:externals' removed)

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 05:34:31 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1179483
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1196451
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java Wed Nov  2 05:34:31 2011
@@ -94,7 +94,9 @@ public class TestHDFSCLI extends CLITest
     return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
   }
 
-  @Test
+  //TODO: The test is failing due to the change in HADOOP-7360.
+  //      HDFS-2038 is going to fix it.  Disable the test for the moment.
+  //@Test
   @Override
   public void testAll () {
     super.testAll();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java Wed Nov  2 05:34:31 2011
@@ -105,7 +105,7 @@ public class TestResolveHdfsSymlink {
    * @throws IOException
    * @throws InterruptedException
    */
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testFcDelegationToken() throws UnsupportedFileSystemException,
       IOException, InterruptedException {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Nov  2 05:34:31 2011
@@ -36,6 +36,7 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.math.stat.descriptive.rank.Min;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -86,6 +87,10 @@ public class MiniDFSCluster {
 
   private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
   private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
+  /** System property to set the data dir: {@value} */
+  public static final String PROP_TEST_BUILD_DATA = "test.build.data";
+  /** Configuration option to set the data dir: {@value} */
+  public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
 
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
 
@@ -495,7 +500,7 @@ public class MiniDFSCluster {
       boolean waitSafeMode, boolean setupHostsFile, boolean federation) 
   throws IOException {
     this.conf = conf;
-    base_dir = new File(getBaseDirectory());
+    base_dir = new File(determineDfsBaseDir());
     data_dir = new File(base_dir, "data");
     this.federation = federation;
     this.waitSafeMode = waitSafeMode;
@@ -504,7 +509,7 @@ public class MiniDFSCluster {
     String rpcEngineName = System.getProperty("hdfs.rpc.engine");
     if (rpcEngineName != null && !"".equals(rpcEngineName)) {
       
-      System.out.println("HDFS using RPCEngine: "+rpcEngineName);
+      LOG.info("HDFS using RPCEngine: " + rpcEngineName);
       try {
         Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
         setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
@@ -858,8 +863,8 @@ public class MiniDFSCluster {
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        File dir1 = getStorageDir(i, 0);
-        File dir2 = getStorageDir(i, 1);
+        File dir1 = getInstanceStorageDir(i, 0);
+        File dir2 = getInstanceStorageDir(i, 1);
         dir1.mkdirs();
         dir2.mkdirs();
         if (!dir1.isDirectory() || !dir2.isDirectory()) { 
@@ -875,17 +880,17 @@ public class MiniDFSCluster {
         dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
             simulatedCapacities[i-curDatanodesNum]);
       }
-      System.out.println("Starting DataNode " + i + " with "
+      LOG.info("Starting DataNode " + i + " with "
                          + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                          + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
       if (hosts != null) {
         dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
-        System.out.println("Starting DataNode " + i + " with hostname set to: " 
+        LOG.info("Starting DataNode " + i + " with hostname set to: "
                            + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
       }
       if (racks != null) {
         String name = hosts[i - curDatanodesNum];
-        System.out.println("Adding node with hostname : " + name + " to rack "+
+        LOG.info("Adding node with hostname : " + name + " to rack " +
                             racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(name,
                                     racks[i-curDatanodesNum]);
@@ -903,7 +908,7 @@ public class MiniDFSCluster {
       String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
       if (racks != null) {
         int port = dn.getSelfAddr().getPort();
-        System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
+        LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
                             " to rack " + racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                   racks[i-curDatanodesNum]);
@@ -1099,7 +1104,7 @@ public class MiniDFSCluster {
    * Shutdown all the nodes in the cluster.
    */
   public void shutdown() {
-    System.out.println("Shutting down the Mini HDFS Cluster");
+    LOG.info("Shutting down the Mini HDFS Cluster");
     shutdownDataNodes();
     for (NameNodeInfo nnInfo : nameNodes) {
       NameNode nameNode = nnInfo.nameNode;
@@ -1139,7 +1144,7 @@ public class MiniDFSCluster {
   public synchronized void shutdownNameNode(int nnIndex) {
     NameNode nn = nameNodes[nnIndex].nameNode;
     if (nn != null) {
-      System.out.println("Shutting down the namenode");
+      LOG.info("Shutting down the namenode");
       nn.stop();
       nn.join();
       Configuration conf = nameNodes[nnIndex].conf;
@@ -1183,9 +1188,9 @@ public class MiniDFSCluster {
     nameNodes[nnIndex] = new NameNodeInfo(nn, conf);
     if (waitActive) {
       waitClusterUp();
-      System.out.println("Restarted the namenode");
+      LOG.info("Restarted the namenode");
       waitActive();
-      System.out.println("Cluster is active");
+      LOG.info("Cluster is active");
     }
   }
 
@@ -1261,7 +1266,7 @@ public class MiniDFSCluster {
     }
     DataNodeProperties dnprop = dataNodes.remove(i);
     DataNode dn = dnprop.datanode;
-    System.out.println("MiniDFSCluster Stopping DataNode " + 
+    LOG.info("MiniDFSCluster Stopping DataNode " +
                        dn.getMachineName() +
                        " from a total of " + (dataNodes.size() + 1) + 
                        " datanodes.");
@@ -1350,7 +1355,7 @@ public class MiniDFSCluster {
     for (int i = dataNodes.size() - 1; i >= 0; i--) {
       if (!restartDataNode(i, keepPort))
         return false;
-      System.out.println("Restarted DataNode " + i);
+      LOG.info("Restarted DataNode " + i);
     }
     return true;
   }
@@ -1377,8 +1382,8 @@ public class MiniDFSCluster {
     } catch (IOException ioe) {
       // This method above should never throw.
       // It only throws IOE since it is exposed via RPC
-      throw new AssertionError("Unexpected IOE thrown: "
-          + StringUtils.stringifyException(ioe));
+      throw (AssertionError)(new AssertionError("Unexpected IOE thrown: "
+          + StringUtils.stringifyException(ioe)).initCause(ioe));
     }
     boolean isUp = false;
     synchronized (this) {
@@ -1524,7 +1529,7 @@ public class MiniDFSCluster {
           failedCount++;
           // Cached RPC connection to namenode, if any, is expected to fail once
           if (failedCount > 1) {
-            System.out.println("Tried waitActive() " + failedCount
+            LOG.warn("Tried waitActive() " + failedCount
                 + " time(s) and failed, giving up.  "
                 + StringUtils.stringifyException(e));
             throw e;
@@ -1576,7 +1581,7 @@ public class MiniDFSCluster {
   }
 
   public void formatDataNodeDirs() throws IOException {
-    base_dir = new File(getBaseDirectory());
+    base_dir = new File(determineDfsBaseDir());
     data_dir = new File(base_dir, "data");
     if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
       throw new IOException("Cannot remove data directory: " + data_dir);
@@ -1697,8 +1702,49 @@ public class MiniDFSCluster {
     return data_dir.getAbsolutePath();
   }
 
+  /**
+   * Get the base directory for this MiniDFS instance.
+   * <p/>
+   * Within the MiniDFCluster class and any subclasses, this method should be
+   * used instead of {@link #getBaseDirectory()} which doesn't support
+   * configuration-specific base directories.
+   * <p/>
+   * First the Configuration property {@link #HDFS_MINIDFS_BASEDIR} is fetched.
+   * If non-null, this is returned.
+   * If this is null, then {@link #getBaseDirectory()} is called.
+   * @return the base directory for this instance.
+   */
+  protected String determineDfsBaseDir() {
+    String dfsdir = conf.get(HDFS_MINIDFS_BASEDIR, null);
+    if (dfsdir == null) {
+      dfsdir = getBaseDirectory();
+    }
+    return dfsdir;
+  }
+
+  /**
+   * Get the base directory for any DFS cluster whose configuration does
+   * not explicitly set it. This is done by retrieving the system property
+   * {@link #PROP_TEST_BUILD_DATA} (defaulting to "build/test/data" ),
+   * and returning that directory with a subdir of /dfs.
+   * @return a directory for use as a miniDFS filesystem.
+   */
   public static String getBaseDirectory() {
-    return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
+    return System.getProperty(PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
+  }
+
+  /**
+   * Get a storage directory for a datanode in this specific instance of
+   * a MiniCluster.
+   *
+   * @param dnIndex datanode index (starts from 0)
+   * @param dirIndex directory index (0 or 1). Index 0 provides access to the
+   *          first storage directory. Index 1 provides access to the second
+   *          storage directory.
+   * @return Storage directory
+   */
+  public File getInstanceStorageDir(int dnIndex, int dirIndex) {
+    return new File(base_dir, getStorageDirPath(dnIndex, dirIndex));
   }
 
   /**
@@ -1716,13 +1762,25 @@ public class MiniDFSCluster {
    * @return Storage directory
    */
   public static File getStorageDir(int dnIndex, int dirIndex) {
-    return new File(getBaseDirectory() + "data/data" + (2*dnIndex + 1 + dirIndex));
+    return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
   }
-  
+
   /**
-   * Get current directory corresponding to the datanode
-   * @param storageDir
-   * @return current directory
+   * Calculate the DN instance-specific path for appending to the base dir
+   * to determine the location of the storage of a DN instance in the mini cluster
+   * @param dnIndex datanode index
+   * @param dirIndex directory index (0 or 1).
+   * @return
+   */
+  private static String getStorageDirPath(int dnIndex, int dirIndex) {
+    return "data/data" + (2 * dnIndex + 1 + dirIndex);
+  }
+
+  /**
+   * Get current directory corresponding to the datanode as defined in
+   * (@link Storage#STORAGE_DIR_CURRENT}
+   * @param storageDir the storage directory of a datanode.
+   * @return the datanode current directory
    */
   public static String getDNCurrentDir(File storageDir) {
     return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
@@ -1730,8 +1788,8 @@ public class MiniDFSCluster {
   
   /**
    * Get directory corresponding to block pool directory in the datanode
-   * @param storageDir
-   * @return current directory
+   * @param storageDir the storage directory of a datanode.
+   * @return the block pool directory
    */
   public static String getBPDir(File storageDir, String bpid) {
     return getDNCurrentDir(storageDir) + bpid + "/";
@@ -1777,6 +1835,16 @@ public class MiniDFSCluster {
     return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
         blk.getBlockName());
   }
+
+  /**
+   * Shut down a cluster if it is not null
+   * @param cluster cluster reference or null
+   */
+  public static void shutdownCluster(MiniDFSCluster cluster) {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
   
   /**
    * Get all files related to a block from all the datanodes

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java Wed Nov  2 05:34:31 2011
@@ -35,28 +35,29 @@ import org.apache.hadoop.hdfs.ByteRangeI
 import org.junit.Test;
 
 class MockHttpURLConnection extends HttpURLConnection {
-  private int responseCode = -1;
-  URL m;
-
   public MockHttpURLConnection(URL u) {
     super(u);
-    m = u;
   }
   
+  @Override
   public boolean usingProxy(){
     return false;
   }
   
+  @Override
   public void disconnect() {
   }
   
-  public void connect() throws IOException {
+  @Override
+  public void connect() {
   }
   
+  @Override
   public InputStream getInputStream() throws IOException {
     return new ByteArrayInputStream("asdf".getBytes());
   } 
 
+  @Override
   public URL getURL() {
     URL u = null;
     try {
@@ -67,6 +68,7 @@ class MockHttpURLConnection extends Http
     return u;
   }
   
+  @Override
   public int getResponseCode() {
     if (responseCode != -1) {
       return responseCode;
@@ -82,10 +84,45 @@ class MockHttpURLConnection extends Http
   public void setResponseCode(int resCode) {
     responseCode = resCode;
   }
-
 }
 
 public class TestByteRangeInputStream {
+  @Test
+  public void testRemoveOffset() throws IOException {
+    { //no offset
+      String s = "http://test/Abc?Length=99";
+      assertEquals(s, ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+    }
+
+    { //no parameters
+      String s = "http://test/Abc";
+      assertEquals(s, ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+    }
+
+    { //offset as first parameter
+      String s = "http://test/Abc?offset=10&Length=99";
+      assertEquals("http://test/Abc?Length=99",
+          ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+    }
+
+    { //offset as second parameter
+      String s = "http://test/Abc?op=read&OFFset=10&Length=99";
+      assertEquals("http://test/Abc?op=read&Length=99",
+          ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+    }
+
+    { //offset as last parameter
+      String s = "http://test/Abc?Length=99&offset=10";
+      assertEquals("http://test/Abc?Length=99",
+          ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+    }
+
+    { //offset as the only parameter
+      String s = "http://test/Abc?offset=10";
+      assertEquals("http://test/Abc",
+          ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+    }
+  }
   
   @Test
   public void testByteRange() throws IOException {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java Wed Nov  2 05:34:31 2011
@@ -83,7 +83,7 @@ public class TestCrcCorruption {
       // file disallows this Datanode to send data to another datanode.
       // However, a client is alowed access to this block.
       //
-      File storageDir = MiniDFSCluster.getStorageDir(0, 1);
+      File storageDir = cluster.getInstanceStorageDir(0, 1);
       String bpid = cluster.getNamesystem().getBlockPoolId();
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
@@ -142,7 +142,7 @@ public class TestCrcCorruption {
       // Now deliberately corrupt all meta blocks from the second
       // directory of the first datanode
       //
-      storageDir = MiniDFSCluster.getStorageDir(0, 1);
+      storageDir = cluster.getInstanceStorageDir(0, 1);
       data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       blocks = data_dir.listFiles();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Wed Nov  2 05:34:31 2011
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.base.Charsets;
 import com.google.common.collect.Lists;
 
 /**
@@ -263,10 +264,14 @@ public class TestDFSRollback extends Tes
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
       for (File f : baseDirs) { 
-        UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
+        UpgradeUtilities.corruptFile(
+            new File(f,"VERSION"),
+            "layoutVersion".getBytes(Charsets.UTF_8),
+            "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
       }
       startNameNodeShouldFail(StartupOption.ROLLBACK,
           "file VERSION has layoutVersion missing");
+
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode rollback with old layout version in previous", numDirs);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java Wed Nov  2 05:34:31 2011
@@ -39,6 +39,7 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
+import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 
 import static org.junit.Assert.*;
@@ -303,7 +304,10 @@ public class TestDFSUpgrade {
       log("NameNode upgrade with corrupt version file", numDirs);
       baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       for (File f : baseDirs) { 
-        UpgradeUtilities.corruptFile(new File (f,"VERSION")); 
+        UpgradeUtilities.corruptFile(
+            new File(f,"VERSION"),
+            "layoutVersion".getBytes(Charsets.UTF_8),
+            "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
       }
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Wed Nov  2 05:34:31 2011
@@ -31,6 +31,7 @@ import java.util.Random;
 
 import junit.framework.TestCase;
 
+import org.apache.commons.digester.SetRootRule;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
@@ -50,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.d
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -59,6 +62,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * This tests data transfer protocol handling in the Datanode. It sends
@@ -68,6 +72,9 @@ public class TestDataTransferProtocol ex
   
   private static final Log LOG = LogFactory.getLog(
                     "org.apache.hadoop.hdfs.TestDataTransferProtocol");
+
+  private static final DataChecksum DEFAULT_CHECKSUM =
+    DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512);
   
   DatanodeID datanode;
   InetSocketAddress dnAddr;
@@ -117,10 +124,8 @@ public class TestDataTransferProtocol ex
         throw eof;
       }
 
-      LOG.info("Received: " +
-          StringUtils.byteToHexString(retBuf));
-      LOG.info("Expected: " +
-          StringUtils.byteToHexString(recvBuf.toByteArray()));
+      LOG.info("Received: " +new String(retBuf));
+      LOG.info("Expected: " + StringUtils.byteToHexString(recvBuf.toByteArray()));
       
       if (eofExpected) {
         throw new IOException("Did not recieve IOException when an exception " +
@@ -129,10 +134,8 @@ public class TestDataTransferProtocol ex
       }
       
       byte[] needed = recvBuf.toByteArray();
-      for (int i=0; i<retBuf.length; i++) {
-        System.out.print(retBuf[i]);
-        assertEquals("checking byte[" + i + "]", needed[i], retBuf[i]);
-      }
+      assertEquals(StringUtils.byteToHexString(needed),
+          StringUtils.byteToHexString(retBuf));
     } finally {
       IOUtils.closeSocket(sock);
     }
@@ -153,9 +156,6 @@ public class TestDataTransferProtocol ex
   
   private void writeZeroLengthPacket(ExtendedBlock block, String description)
   throws IOException {
-    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
-    sendOut.writeInt(512);         // checksum size
-
     PacketHeader hdr = new PacketHeader(
       8,                   // size of packet
       block.getNumBytes(), // OffsetInBlock
@@ -166,18 +166,22 @@ public class TestDataTransferProtocol ex
     sendOut.writeInt(0);           // zero checksum
 
     //ok finally write a block with 0 len
-    sendResponse(Status.SUCCESS, "", recvOut);
+    sendResponse(Status.SUCCESS, "", null, recvOut);
     new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
     sendRecvData(description, false);
   }
   
   private void sendResponse(Status status, String firstBadLink,
+      String message,
       DataOutputStream out)
   throws IOException {
     Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
     if (firstBadLink != null) {
       builder.setFirstBadLink(firstBadLink);
     }
+    if (message != null) {
+      builder.setMessage(message);
+    }
     builder.build()
       .writeDelimitedTo(out);
   }
@@ -188,13 +192,14 @@ public class TestDataTransferProtocol ex
     recvBuf.reset();
     sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], null, stage,
-        0, block.getNumBytes(), block.getNumBytes(), newGS);
+        0, block.getNumBytes(), block.getNumBytes(), newGS,
+        DEFAULT_CHECKSUM);
     if (eofExcepted) {
-      sendResponse(Status.ERROR, null, recvOut);
+      sendResponse(Status.ERROR, null, null, recvOut);
       sendRecvData(description, true);
     } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
       //ok finally write a block with 0 len
-      sendResponse(Status.SUCCESS, "", recvOut);
+      sendResponse(Status.SUCCESS, "", null, recvOut);
       sendRecvData(description, false);
     } else {
       writeZeroLengthPacket(block, description);
@@ -373,17 +378,18 @@ public class TestDataTransferProtocol ex
     
     /* Test OP_WRITE_BLOCK */
     sendBuf.reset();
+    
+    DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
+    Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();
+
     sender.writeBlock(new ExtendedBlock(poolId, newBlockId),
         BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE,
-        0, 0L, 0L, 0L);
-    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
-    
-    // bad bytes per checksum
-    sendOut.writeInt(-1-random.nextInt(oneMil));
+        0, 0L, 0L, 0L,
+        badChecksum);
     recvBuf.reset();
-    sendResponse(Status.ERROR, null, recvOut);
+    sendResponse(Status.ERROR, null, null, recvOut);
     sendRecvData("wrong bytesPerChecksum while writing", true);
 
     sendBuf.reset();
@@ -391,9 +397,8 @@ public class TestDataTransferProtocol ex
     sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId),
         BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], null,
-        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L);
-    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
-    sendOut.writeInt(512);
+        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
+        DEFAULT_CHECKSUM);
 
     PacketHeader hdr = new PacketHeader(
       4,     // size of packet
@@ -403,7 +408,7 @@ public class TestDataTransferProtocol ex
       -1 - random.nextInt(oneMil)); // bad datalen
     hdr.write(sendOut);
 
-    sendResponse(Status.SUCCESS, "", recvOut);
+    sendResponse(Status.SUCCESS, "", null, recvOut);
     new PipelineAck(100, new Status[]{Status.ERROR}).write(recvOut);
     sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, 
                  true);
@@ -414,9 +419,8 @@ public class TestDataTransferProtocol ex
     sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId),
         BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], null,
-        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L);
-    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
-    sendOut.writeInt(512);         // checksum size
+        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
+        DEFAULT_CHECKSUM);
 
     hdr = new PacketHeader(
       8,     // size of packet
@@ -428,7 +432,7 @@ public class TestDataTransferProtocol ex
     sendOut.writeInt(0);           // zero checksum
     sendOut.flush();
     //ok finally write a block with 0 len
-    sendResponse(Status.SUCCESS, "", recvOut);
+    sendResponse(Status.SUCCESS, "", null, recvOut);
     new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
     sendRecvData("Writing a zero len block blockid " + newBlockId, false);
     
@@ -462,7 +466,15 @@ public class TestDataTransferProtocol ex
     
     // negative length is ok. Datanode assumes we want to read the whole block.
     recvBuf.reset();
-    sendResponse(Status.SUCCESS, null, recvOut);
+    
+    BlockOpResponseProto.newBuilder()
+      .setStatus(Status.SUCCESS)
+      .setReadOpChecksumInfo(ReadOpChecksumInfoProto.newBuilder()
+          .setChecksum(DataTransferProtoUtil.toProto(DEFAULT_CHECKSUM))
+          .setChunkOffset(0L))
+      .build()
+      .writeDelimitedTo(recvOut);
+    
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         0L, -1L-random.nextInt(oneMil));
@@ -471,7 +483,11 @@ public class TestDataTransferProtocol ex
     
     // length is more than size of block.
     recvBuf.reset();
-    sendResponse(Status.ERROR, null, recvOut);
+    sendResponse(Status.ERROR, null,
+        "opReadBlock " + firstBlock +
+        " received exception java.io.IOException:  " +
+        "Offset 0 and length 4097 don't match block " + firstBlock + " ( blockLen 4096 )",
+        recvOut);
     sendBuf.reset();
     sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         0L, fileLen+1);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java Wed Nov  2 05:34:31 2011
@@ -77,7 +77,7 @@ public class TestDatanodeReport extends 
                    NUM_OF_DATANODES);
 
       Thread.sleep(5000);
-      assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
+      assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
     }finally {
       cluster.shutdown();
     }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Wed Nov  2 05:34:31 2011
@@ -412,6 +412,25 @@ public class TestDistributedFileSystem {
     final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
         current.getShortUserName() + "x", new String[]{"user"});
     
+    try {
+      ((DistributedFileSystem) hdfs).getFileChecksum(new Path(
+          "/test/TestNonExistingFile"));
+      fail("Expecting FileNotFoundException");
+    } catch (FileNotFoundException e) {
+      assertTrue("Not throwing the intended exception message", e.getMessage()
+          .contains("File does not exist: /test/TestNonExistingFile"));
+    }
+
+    try {
+      Path path = new Path("/test/TestExistingDir/");
+      hdfs.mkdirs(path);
+      ((DistributedFileSystem) hdfs).getFileChecksum(path);
+      fail("Expecting FileNotFoundException");
+    } catch (FileNotFoundException e) {
+      assertTrue("Not throwing the intended exception message", e.getMessage()
+          .contains("File does not exist: /test/TestExistingDir"));
+    }
+    
     //hftp
     final String hftpuri = "hftp://" + nnAddr;
     System.out.println("hftpuri=" + hftpuri);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Wed Nov  2 05:34:31 2011
@@ -65,7 +65,7 @@ public class TestFileCorruption extends 
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       // Now deliberately remove the blocks
-      File storageDir = MiniDFSCluster.getStorageDir(2, 0);
+      File storageDir = cluster.getInstanceStorageDir(2, 0);
       String bpid = cluster.getNamesystem().getBlockPoolId();
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
@@ -127,11 +127,11 @@ public class TestFileCorruption extends 
       
       // get the block
       final String bpid = cluster.getNamesystem().getBlockPoolId();
-      File storageDir = MiniDFSCluster.getStorageDir(0, 0);
+      File storageDir = cluster.getInstanceStorageDir(0, 0);
       File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       ExtendedBlock blk = getBlock(bpid, dataDir);
       if (blk == null) {
-        storageDir = MiniDFSCluster.getStorageDir(0, 1);
+        storageDir = cluster.getInstanceStorageDir(0, 1);
         dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         blk = getBlock(bpid, dataDir);
       }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java Wed Nov  2 05:34:31 2011
@@ -158,6 +158,24 @@ public class TestSetTimes extends TestCa
       assertTrue(atime2 == stat.getAccessTime());
       assertTrue(mtime2 == mtime3);
 
+      long mtime4 = System.currentTimeMillis() - (3600L * 1000L);
+      long atime4 = System.currentTimeMillis();
+      fileSys.setTimes(dir1, mtime4, atime4);
+      // check new modification time on file
+      stat = fileSys.getFileStatus(dir1);
+      assertTrue("Not matching the modification times", mtime4 == stat
+          .getModificationTime());
+      assertTrue("Not matching the access times", atime4 == stat
+          .getAccessTime());
+
+      Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
+      try {
+        fileSys.setTimes(nonExistingDir, mtime4, atime4);
+        fail("Expecting FileNotFoundException");
+      } catch (FileNotFoundException e) {
+        assertTrue(e.getMessage().contains(
+            "File/Directory " + nonExistingDir.toString() + " does not exist."));
+      }
       // shutdown cluster and restart
       cluster.shutdown();
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Wed Nov  2 05:34:31 2011
@@ -24,10 +24,8 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.io.RandomAccessFile;
 import java.net.URI;
 import java.util.Arrays;
-import java.util.Random;
 import java.util.Collections;
 import java.util.zip.CRC32;
 import org.apache.hadoop.conf.Configuration;
@@ -53,6 +51,10 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 
+import com.google.common.base.Preconditions;
+import com.google.common.io.Files;
+import com.google.common.primitives.Bytes;
+
 /**
  * This class defines a number of static helper methods used by the
  * DFS Upgrade unit tests.  By default, a singleton master populated storage
@@ -483,20 +485,26 @@ public class UpgradeUtilities {
    * @throws IllegalArgumentException if the given file is not a file
    * @throws IOException if an IOException occurs while reading or writing the file
    */
-  public static void corruptFile(File file) throws IOException {
+  public static void corruptFile(File file,
+      byte[] stringToCorrupt,
+      byte[] replacement) throws IOException {
+    Preconditions.checkArgument(replacement.length == stringToCorrupt.length);
     if (!file.isFile()) {
       throw new IllegalArgumentException(
-                                         "Given argument is not a file:" + file);
+          "Given argument is not a file:" + file);
     }
-    RandomAccessFile raf = new RandomAccessFile(file,"rws");
-    Random random = new Random();
-    for (long i = 0; i < raf.length(); i++) {
-      raf.seek(i);
-      if (random.nextBoolean()) {
-        raf.writeByte(random.nextInt());
-      }
+    byte[] data = Files.toByteArray(file);
+    int index = Bytes.indexOf(data, stringToCorrupt);
+    if (index == -1) {
+      throw new IOException(
+          "File " + file + " does not contain string " +
+          new String(stringToCorrupt));
+    }
+
+    for (int i = 0; i < stringToCorrupt.length; i++) {
+      data[index + i] = replacement[i];
     }
-    raf.close();
+    Files.write(data, file);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Wed Nov  2 05:34:31 2011
@@ -148,7 +148,7 @@ public class TestDelegationToken {
   @Test
   public void testDelegationTokenDFSApi() throws Exception {
     DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
-    Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
+    final Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
     DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
     byte[] tokenId = token.getIdentifier();
     identifier.readFields(new DataInputStream(
@@ -156,6 +156,15 @@ public class TestDelegationToken {
     LOG.info("A valid token should have non-null password, and should be renewed successfully");
     Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
     dtSecretManager.renewToken(token, "JobTracker");
+    UserGroupInformation.createRemoteUser("JobTracker").doAs(
+        new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            token.renew(config);
+            token.cancel(config);
+            return null;
+          }
+        });
   }
   
   @Test
@@ -174,15 +183,26 @@ public class TestDelegationToken {
       }
     });
 
-    final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
+    final Token<DelegationTokenIdentifier> token = webhdfs
+        .getDelegationToken("JobTracker");
     DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
     byte[] tokenId = token.getIdentifier();
-    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+    identifier
+        .readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
     LOG.info("A valid token should have non-null password, and should be renewed successfully");
     Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
     dtSecretManager.renewToken(token, "JobTracker");
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        token.renew(config);
+        token.cancel(config);
+        return null;
+      }
+    });
   }
 
+  @SuppressWarnings("deprecation")
   @Test
   public void testDelegationTokenWithDoAs() throws Exception {
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Wed Nov  2 05:34:31 2011
@@ -51,12 +51,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.io.TestWritable;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtocolSignature;
@@ -96,9 +96,9 @@ public class TestBlockToken {
     ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
   }
-  
+
   /** Directory where we can count our open file descriptors under Linux */
-  static File FD_DIR = new File("/proc/self/fd/");  
+  static File FD_DIR = new File("/proc/self/fd/");
 
   long blockKeyUpdateInterval = 10 * 60 * 1000; // 10 mins
   long blockTokenLifetime = 2 * 60 * 1000; // 2 mins
@@ -120,7 +120,8 @@ public class TestBlockToken {
     public Long answer(InvocationOnMock invocation) throws IOException {
       Object args[] = invocation.getArguments();
       assertEquals(1, args.length);
-      ExtendedBlock block = (ExtendedBlock) args[0];
+      org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable block = 
+          (org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable) args[0];
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
       assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
@@ -129,7 +130,9 @@ public class TestBlockToken {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
         LOG.info("Got: " + id.toString());
         assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
-        sm.checkAccess(id, null, block, BlockTokenSecretManager.AccessMode.WRITE);
+        sm.checkAccess(id, null, org.apache.hadoop.hdfs.protocolR23Compatible.
+            ExtendedBlockWritable.convertExtendedBlock(block),
+            BlockTokenSecretManager.AccessMode.WRITE);
         result = id.getBlockId();
       }
       return result;
@@ -137,7 +140,8 @@ public class TestBlockToken {
   }
 
   private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm,
-      ExtendedBlock block, EnumSet<BlockTokenSecretManager.AccessMode> accessModes)
+      ExtendedBlock block,
+      EnumSet<BlockTokenSecretManager.AccessMode> accessModes)
       throws IOException {
     Token<BlockTokenIdentifier> token = sm.generateToken(block, accessModes);
     BlockTokenIdentifier id = sm.createIdentifier();
@@ -151,12 +155,12 @@ public class TestBlockToken {
     TestWritable.testWritable(new BlockTokenIdentifier());
     BlockTokenSecretManager sm = new BlockTokenSecretManager(true,
         blockKeyUpdateInterval, blockTokenLifetime);
-    TestWritable.testWritable(generateTokenId(sm, block1, EnumSet
-        .allOf(BlockTokenSecretManager.AccessMode.class)));
-    TestWritable.testWritable(generateTokenId(sm, block2, EnumSet
-        .of(BlockTokenSecretManager.AccessMode.WRITE)));
-    TestWritable.testWritable(generateTokenId(sm, block3, EnumSet
-        .noneOf(BlockTokenSecretManager.AccessMode.class)));
+    TestWritable.testWritable(generateTokenId(sm, block1,
+        EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)));
+    TestWritable.testWritable(generateTokenId(sm, block2,
+        EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)));
+    TestWritable.testWritable(generateTokenId(sm, block3,
+        EnumSet.noneOf(BlockTokenSecretManager.AccessMode.class)));
   }
 
   private void tokenGenerationAndVerification(BlockTokenSecretManager master,
@@ -176,8 +180,8 @@ public class TestBlockToken {
       slave.checkAccess(token2, null, block2, mode);
     }
     // multi-mode tokens
-    Token<BlockTokenIdentifier> mtoken = master.generateToken(block3, EnumSet
-        .allOf(BlockTokenSecretManager.AccessMode.class));
+    Token<BlockTokenIdentifier> mtoken = master.generateToken(block3,
+        EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
     for (BlockTokenSecretManager.AccessMode mode : BlockTokenSecretManager.AccessMode
         .values()) {
       master.checkAccess(mtoken, null, block3, mode);
@@ -202,25 +206,28 @@ public class TestBlockToken {
     slaveHandler.setKeys(keys);
     tokenGenerationAndVerification(masterHandler, slaveHandler);
   }
-  
+
   private Server createMockDatanode(BlockTokenSecretManager sm,
       Token<BlockTokenIdentifier> token) throws IOException {
-    ClientDatanodeProtocol mockDN = mock(ClientDatanodeProtocol.class);
+    org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol mockDN =
+        mock(org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class);
     when(mockDN.getProtocolVersion(anyString(), anyLong())).thenReturn(
-        ClientDatanodeProtocol.versionID);
-    doReturn(ProtocolSignature.getProtocolSignature(
-        mockDN, ClientDatanodeProtocol.class.getName(),
-        ClientDatanodeProtocol.versionID, 0))
-      .when(mockDN).getProtocolSignature(anyString(), anyLong(), anyInt());
+        org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.versionID);
+    doReturn(
+        ProtocolSignature.getProtocolSignature(mockDN,
+            org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class.getName(),
+            org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.versionID, 0)).when(mockDN)
+        .getProtocolSignature(anyString(), anyLong(), anyInt());
 
     BlockTokenIdentifier id = sm.createIdentifier();
     id.readFields(new DataInputStream(new ByteArrayInputStream(token
         .getIdentifier())));
     doAnswer(new getLengthAnswer(sm, id)).when(mockDN).getReplicaVisibleLength(
-        any(ExtendedBlock.class));
+        any(org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable.class));
 
-    return RPC.getServer(ClientDatanodeProtocol.class, mockDN,
-        ADDRESS, 0, 5, true, conf, sm);
+    return RPC.getServer(org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class, 
+        mockDN, ADDRESS, 0, 5,
+        true, conf, sm);
   }
 
   @Test
@@ -241,9 +248,8 @@ public class TestBlockToken {
 
     ClientDatanodeProtocol proxy = null;
     try {
-      proxy = RPC.getProxy(
-          ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, addr,
-          ticket, conf, NetUtils.getDefaultSocketFactory(conf));
+      proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
+          NetUtils.getDefaultSocketFactory(conf));
       assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
     } finally {
       server.stop();
@@ -255,8 +261,8 @@ public class TestBlockToken {
 
   /**
    * Test that fast repeated invocations of createClientDatanodeProtocolProxy
-   * will not end up using up thousands of sockets. This is a regression test for
-   * HDFS-1965.
+   * will not end up using up thousands of sockets. This is a regression test
+   * for HDFS-1965.
    */
   @Test
   public void testBlockTokenRpcLeak() throws Exception {
@@ -270,9 +276,9 @@ public class TestBlockToken {
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID(
-        "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
-    
+    DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(),
+        "fake-storage", 0, addr.getPort());
+
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
     fakeBlock.setBlockToken(token);
@@ -282,19 +288,19 @@ public class TestBlockToken {
     // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
     // actually close the TCP connections to the real target DN.
     ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(
-        ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, 
+        ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
         new InetSocketAddress("1.1.1.1", 1),
-        UserGroupInformation.createRemoteUser("junk"),
-        conf, NetUtils.getDefaultSocketFactory(conf));
-    
+        UserGroupInformation.createRemoteUser("junk"), conf,
+        NetUtils.getDefaultSocketFactory(conf));
+
     ClientDatanodeProtocol proxy = null;
 
     int fdsAtStart = countOpenFileDescriptors();
     try {
       long endTime = System.currentTimeMillis() + 3000;
       while (System.currentTimeMillis() < endTime) {
-        proxy = DFSUtil.createClientDatanodeProtocolProxy(
-            fakeDnId, conf, 1000, fakeBlock);
+        proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
+            fakeBlock);
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
         if (proxy != null) {
           RPC.stopProxy(proxy);
@@ -303,32 +309,31 @@ public class TestBlockToken {
       }
 
       int fdsAtEnd = countOpenFileDescriptors();
-      
+
       if (fdsAtEnd - fdsAtStart > 50) {
         fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
       }
     } finally {
       server.stop();
     }
-    
+
     RPC.stopProxy(proxyToNoWhere);
   }
 
   /**
-   * @return the current number of file descriptors open by this
-   * process.
+   * @return the current number of file descriptors open by this process.
    */
   private static int countOpenFileDescriptors() throws IOException {
     return FD_DIR.list().length;
   }
 
-  /** 
+  /**
    * Test {@link BlockPoolTokenSecretManager}
    */
   @Test
   public void testBlockPoolTokenSecretManager() throws Exception {
     BlockPoolTokenSecretManager bpMgr = new BlockPoolTokenSecretManager();
-    
+
     // Test BlockPoolSecretManager with upto 10 block pools
     for (int i = 0; i < 10; i++) {
       String bpid = Integer.toString(i);
@@ -337,12 +342,11 @@ public class TestBlockToken {
       BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(false,
           blockKeyUpdateInterval, blockTokenLifetime);
       bpMgr.addBlockPool(bpid, slaveHandler);
-      
-      
+
       ExportedBlockKeys keys = masterHandler.exportKeys();
       bpMgr.setKeys(bpid, keys);
       tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
-      
+
       // Test key updating
       masterHandler.updateKeys();
       tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
@@ -351,11 +355,12 @@ public class TestBlockToken {
       tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
     }
   }
-  
+
   /**
-   * This test writes a file and gets the block locations without closing
-   * the file, and tests the block token in the last block. Block token is
-   * verified by ensuring it is of correct kind.
+   * This test writes a file and gets the block locations without closing the
+   * file, and tests the block token in the last block. Block token is verified
+   * by ensuring it is of correct kind.
+   * 
    * @throws IOException
    * @throws InterruptedException
    */
@@ -389,5 +394,5 @@ public class TestBlockToken {
     } finally {
       cluster.shutdown();
     }
-  } 
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Wed Nov  2 05:34:31 2011
@@ -80,7 +80,7 @@ public class TestBlockManager {
         "need to set a dummy value here so it assumes a multi-rack cluster");
     fsn = Mockito.mock(FSNamesystem.class);
     Mockito.doReturn(true).when(fsn).hasWriteLock();
-    bm = new BlockManager(fsn, conf);
+    bm = new BlockManager(fsn, fsn, conf);
   }
   
   private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Wed Nov  2 05:34:31 2011
@@ -63,7 +63,7 @@ public class TestOverReplicatedBlocks ex
       DataNodeProperties dnProps = cluster.stopDataNode(0);
       // remove block scanner log to trigger block scanning
       File scanLog = new File(MiniDFSCluster.getFinalizedDir(
-          MiniDFSCluster.getStorageDir(0, 0),
+          cluster.getInstanceStorageDir(0, 0),
           cluster.getNamesystem().getBlockPoolId()).getParent().toString()
           + "/../dncp_block_verification.log.prev");
       //wait for one minute for deletion to succeed;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Wed Nov  2 05:34:31 2011
@@ -324,7 +324,7 @@ public class TestDataNodeVolumeFailure {
     final String bpid = cluster.getNamesystem().getBlockPoolId();
     for(int i=0; i<dn_num; i++) {
       for(int j=0; j<=1; j++) {
-        File storageDir = MiniDFSCluster.getStorageDir(i, j);
+        File storageDir = cluster.getInstanceStorageDir(i, j);
         File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         if(dir == null) {
           System.out.println("dir is null for dn=" + i + " and data_dir=" + j);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java Wed Nov  2 05:34:31 2011
@@ -234,8 +234,8 @@ public class TestDataNodeVolumeFailureTo
     // Fail the current directory since invalid storage directory perms
     // get fixed up automatically on datanode startup.
     File[] dirs = {
-        new File(MiniDFSCluster.getStorageDir(dnIndex, 0), "current"),
-        new File(MiniDFSCluster.getStorageDir(dnIndex, 1), "current") };
+        new File(cluster.getInstanceStorageDir(dnIndex, 0), "current"),
+        new File(cluster.getInstanceStorageDir(dnIndex, 1), "current") };
 
     try {
       for (int i = 0; i < volumesFailed; i++) {
@@ -274,7 +274,7 @@ public class TestDataNodeVolumeFailureTo
     final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
     ).getDatanodeManager();
     long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
-    File dir = new File(MiniDFSCluster.getStorageDir(0, 0), "current");
+    File dir = new File(cluster.getInstanceStorageDir(0, 0), "current");
 
     try {
       prepareDirToFail(dir);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1196458&r1=1196457&r2=1196458&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Wed Nov  2 05:34:31 2011
@@ -64,10 +64,10 @@ public class TestDeleteBlockPool {
       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
 
-      File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
-      File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
-      File dn2StorageDir1 = MiniDFSCluster.getStorageDir(1, 0);
-      File dn2StorageDir2 = MiniDFSCluster.getStorageDir(1, 1);
+      File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
+      File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
+      File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0);
+      File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1);
 
       // Although namenode is shutdown, the bp offerservice is still running
       try {
@@ -171,8 +171,8 @@ public class TestDeleteBlockPool {
       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
       
-      File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
-      File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
+      File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
+      File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
       
       Configuration nn1Conf = cluster.getConfiguration(0);
       nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1");