You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2014/07/23 03:47:36 UTC

svn commit: r1612742 [3/4] - in /hadoop/common/branches/MR-2841/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/contrib/bkjo...

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java Wed Jul 23 01:47:28 2014
@@ -122,6 +122,11 @@ public class DFSZKFailoverController ext
           "HA is not enabled for this namenode.");
     }
     String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
+    if (nnId == null) {
+      String msg = "Could not get the namenode ID of this node. " +
+          "You may run zkfc on the node other than namenode.";
+      throw new HadoopIllegalArgumentException(msg);
+    }
     NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
     DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
     

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Wed Jul 23 01:47:28 2014
@@ -77,7 +77,7 @@ public class DFSck extends Configured im
   private static final String USAGE = "Usage: DFSck <path> "
       + "[-list-corruptfileblocks | "
       + "[-move | -delete | -openforwrite] "
-      + "[-files [-blocks [-locations | -racks]]]]\n"
+      + "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
       + "\t<path>\tstart checking from this path\n"
       + "\t-move\tmove corrupted files to /lost+found\n"
       + "\t-delete\tdelete corrupted files\n"
@@ -90,7 +90,8 @@ public class DFSck extends Configured im
       + "blocks and files they belong to\n"
       + "\t-blocks\tprint out block report\n"
       + "\t-locations\tprint out locations for every block\n"
-      + "\t-racks\tprint out network topology for data-node locations\n\n"
+      + "\t-racks\tprint out network topology for data-node locations\n"
+      + "\t-showprogress\tshow progress in output. Default is OFF (no progress)\n\n"
       + "Please Note:\n"
       + "\t1. By default fsck ignores files opened for write, "
       + "use -openforwrite to report such files. They are usually "
@@ -270,6 +271,7 @@ public class DFSck extends Configured im
       else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
       else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
       else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
+      else if (args[idx].equals("-showprogress")) { url.append("&showprogress=1"); }
       else if (args[idx].equals("-list-corruptfileblocks")) {
         url.append("&listcorruptfileblocks=1");
         doListCorruptFileBlocks = true;

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Wed Jul 23 01:47:28 2014
@@ -158,7 +158,7 @@ public class WebHdfsFileSystem extends F
     // getCanonicalUri() in order to handle the case where no port is
     // specified in the URI
     this.tokenServiceName = isLogicalUri ?
-        HAUtil.buildTokenServiceForLogicalUri(uri)
+        HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
         : SecurityUtil.buildTokenService(getCanonicalUri());
 
     if (!isHA) {
@@ -448,6 +448,7 @@ public class WebHdfsFileSystem extends F
 
     protected final HttpOpParam.Op op;
     private final boolean redirected;
+    protected ExcludeDatanodesParam excludeDatanodes = new ExcludeDatanodesParam("");
 
     private boolean checkRetry;
 
@@ -499,6 +500,10 @@ public class WebHdfsFileSystem extends F
      * a DN such as open and checksum
      */
     private HttpURLConnection connect(URL url) throws IOException {
+      //redirect hostname and port
+      String redirectHost = null;
+
+      
       // resolve redirects for a DN operation unless already resolved
       if (op.getRedirect() && !redirected) {
         final HttpOpParam.Op redirectOp =
@@ -511,11 +516,24 @@ public class WebHdfsFileSystem extends F
         try {
           validateResponse(redirectOp, conn, false);
           url = new URL(conn.getHeaderField("Location"));
+          redirectHost = url.getHost() + ":" + url.getPort();
         } finally {
           conn.disconnect();
         }
       }
-      return connect(op, url);
+      try {
+        return connect(op, url);
+      } catch (IOException ioe) {
+        if (redirectHost != null) {
+          if (excludeDatanodes.getValue() != null) {
+            excludeDatanodes = new ExcludeDatanodesParam(redirectHost + ","
+                + excludeDatanodes.getValue());
+          } else {
+            excludeDatanodes = new ExcludeDatanodesParam(redirectHost);
+          }
+        }
+        throw ioe;
+      }      
     }
 
     private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
@@ -652,7 +670,14 @@ public class WebHdfsFileSystem extends F
     
     @Override
     protected URL getUrl() throws IOException {
-      return toUrl(op, fspath, parameters);
+      if (excludeDatanodes.getValue() != null) {
+        Param<?, ?>[] tmpParam = new Param<?, ?>[parameters.length + 1];
+        System.arraycopy(parameters, 0, tmpParam, 0, parameters.length);
+        tmpParam[parameters.length] = excludeDatanodes;
+        return toUrl(op, fspath, tmpParam);
+      } else {
+        return toUrl(op, fspath, parameters);
+      }
     }
   }
 

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Wed Jul 23 01:47:28 2014
@@ -113,6 +113,7 @@ message BlockCommandProto {
   repeated BlockProto blocks = 3;
   repeated DatanodeInfosProto targets = 4;
   repeated StorageUuidsProto targetStorageUuids = 5;
+  repeated StorageTypesProto targetStorageTypes = 6;
 }
 
 /**

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Wed Jul 23 01:47:28 2014
@@ -107,17 +107,21 @@ message OpWriteBlockProto {
    */
   required ChecksumProto requestedChecksum = 9;
   optional CachingStrategyProto cachingStrategy = 10;
+  optional StorageTypeProto storageType = 11 [default = DISK];
+  repeated StorageTypeProto targetStorageTypes = 12;
 }
   
 message OpTransferBlockProto {
   required ClientOperationHeaderProto header = 1;
   repeated DatanodeInfoProto targets = 2;
+  repeated StorageTypeProto targetStorageTypes = 3;
 }
 
 message OpReplaceBlockProto {
   required BaseHeaderProto header = 1;
   required string delHint = 2;
   required DatanodeInfoProto source = 3;
+  optional StorageTypeProto storageType = 4 [default = DISK];
 }
 
 message OpCopyBlockProto {

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Wed Jul 23 01:47:28 2014
@@ -137,6 +137,13 @@ enum StorageTypeProto {
 }
 
 /**
+ * A list of storage types. 
+ */
+message StorageTypesProto {
+  repeated StorageTypeProto storageTypes = 1;
+}
+
+/**
  * A list of storage IDs. 
  */
 message StorageUuidsProto {

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Jul 23 01:47:28 2014
@@ -2039,4 +2039,17 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.randomize-block-locations-per-block</name>
+  <value>false</value>
+  <description>When fetching replica locations of a block, the replicas
+   are sorted based on network distance. This configuration parameter
+   determines whether the replicas at the same network distance are randomly
+   shuffled. By default, this is false, such that repeated requests for a block's
+   replicas always result in the same order. This potentially improves page cache
+   behavior. However, for some network topologies, it is desirable to shuffle this
+   order for better load balancing.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1610815-1612740

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html Wed Jul 23 01:47:28 2014
@@ -24,6 +24,29 @@
     <title>Browsing HDFS</title>
   </head>
   <body>
+
+    <header class="navbar navbar-inverse bs-docs-nav" role="banner">
+    <div class="container">
+      <div class="navbar-header">
+        <div class="navbar-brand">Hadoop</div>
+      </div>
+
+      <ul class="nav navbar-nav" id="ui-tabs">
+        <li><a href="dfshealth.html#tab-overview">Overview</a></li>
+        <li><a href="dfshealth.html#tab-datanode">Datanodes</a></li>
+        <li><a href="dfshealth.html#tab-snapshot">Snapshot</a></li>
+        <li><a href="dfshealth.html#tab-startup-progress">Startup Progress</a></li>
+        <li class="dropdown">
+          <a href="#" class="dropdown-toggle" data-toggle="dropdown">Utilities <b class="caret"></b></a>
+          <ul class="dropdown-menu">
+            <li><a href="#">Browse the file system</a></li>
+            <li><a href="logs">Logs</a></li>
+          </ul>
+        </li>
+      </ul>
+    </div>
+    </header>
+
     <div class="modal" id="file-info" tabindex="-1" role="dialog" aria-hidden="true">
       <div class="modal-dialog">
 	<div class="modal-content">
@@ -73,6 +96,12 @@
       </div>
       <br />
       <div id="panel"></div>
+
+      <div class="row">
+        <hr />
+        <div class="col-xs-2"><p>Hadoop, 2014.</p></div>
+      </div>
+
     </div>
 
     <script type="text/x-dust-template" id="tmpl-explorer">
@@ -126,7 +155,5 @@
     </script><script type="text/javascript" src="/static/dfs-dust.js">
     </script><script type="text/javascript" src="explorer.js">
     </script>
-    <hr />
-    <p>Hadoop, 2014.</p>
   </body>
 </html>

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm Wed Jul 23 01:47:28 2014
@@ -416,8 +416,8 @@ HDFS High Availability Using the Quorum 
 
   After all of the necessary configuration options have been set, you must
   start the JournalNode daemons on the set of machines where they will run. This
-  can be done by running the command "<hdfs-daemon.sh journalnode>" and waiting
-  for the daemon to start on each of the relevant machines.
+  can be done by running the command "<hadoop-daemon.sh start journalnode>" and
+  waiting for the daemon to start on each of the relevant machines.
 
   Once the JournalNodes have been started, one must initially synchronize the
   two HA NameNodes' on-disk metadata.

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Jul 23 01:47:28 2014
@@ -380,7 +380,7 @@ public class DFSTestUtil {
    */
   public static void waitForReplication(MiniDFSCluster cluster, ExtendedBlock b,
       int racks, int replicas, int neededReplicas)
-      throws IOException, TimeoutException, InterruptedException {
+      throws TimeoutException, InterruptedException {
     int curRacks = 0;
     int curReplicas = 0;
     int curNeededReplicas = 0;
@@ -414,7 +414,7 @@ public class DFSTestUtil {
    */
   public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns,
       Path file, ExtendedBlock b, int corruptRepls)
-      throws IOException, TimeoutException, InterruptedException {
+      throws TimeoutException, InterruptedException {
     int count = 0;
     final int ATTEMPTS = 50;
     int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
@@ -839,7 +839,8 @@ public class DFSTestUtil {
 
     // send the request
     new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
-        dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
+        dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
+        new StorageType[]{StorageType.DEFAULT});
     out.flush();
 
     return BlockOpResponseProto.parseDelimitedFrom(in);

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Wed Jul 23 01:47:28 2014
@@ -125,17 +125,16 @@ public class TestDataTransferProtocol {
         throw eof;
       }
 
-      LOG.info("Received: " +new String(retBuf));
-      LOG.info("Expected: " + StringUtils.byteToHexString(recvBuf.toByteArray()));
+      String received = StringUtils.byteToHexString(retBuf);
+      String expected = StringUtils.byteToHexString(recvBuf.toByteArray());
+      LOG.info("Received: " + received);
+      LOG.info("Expected: " + expected);
       
       if (eofExpected) {
         throw new IOException("Did not recieve IOException when an exception " +
                               "is expected while reading from " + datanode); 
       }
-      
-      byte[] needed = recvBuf.toByteArray();
-      assertEquals(StringUtils.byteToHexString(needed),
-          StringUtils.byteToHexString(retBuf));
+      assertEquals(expected, received);
     } finally {
       IOUtils.closeSocket(sock);
     }
@@ -184,10 +183,7 @@ public class TestDataTransferProtocol {
       String description, Boolean eofExcepted) throws IOException {
     sendBuf.reset();
     recvBuf.reset();
-    sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        new DatanodeInfo[1], null, stage,
-        0, block.getNumBytes(), block.getNumBytes(), newGS,
-        DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
+    writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
     if (eofExcepted) {
       sendResponse(Status.ERROR, null, null, recvOut);
       sendRecvData(description, true);
@@ -343,10 +339,7 @@ public class TestDataTransferProtocol {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
     try {
     cluster.waitActive();
-    DFSClient dfsClient = new DFSClient(
-                 new InetSocketAddress("localhost", cluster.getNameNodePort()),
-                 conf);                
-    datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
+    datanode = cluster.getFileSystem().getDataNodeStats(DatanodeReportType.LIVE)[0];
     dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
     FileSystem fileSys = cluster.getFileSystem();
     
@@ -381,23 +374,14 @@ public class TestDataTransferProtocol {
     DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
     Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();
 
-    sender.writeBlock(new ExtendedBlock(poolId, newBlockId),
-        BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        new DatanodeInfo[1], null,
-        BlockConstructionStage.PIPELINE_SETUP_CREATE,
-        0, 0L, 0L, 0L,
-        badChecksum, CachingStrategy.newDefaultStrategy());
+    writeBlock(poolId, newBlockId, badChecksum);
     recvBuf.reset();
     sendResponse(Status.ERROR, null, null, recvOut);
     sendRecvData("wrong bytesPerChecksum while writing", true);
 
     sendBuf.reset();
     recvBuf.reset();
-    sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId),
-        BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        new DatanodeInfo[1], null,
-        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
-        DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
+    writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
 
     PacketHeader hdr = new PacketHeader(
       4,     // size of packet
@@ -416,11 +400,7 @@ public class TestDataTransferProtocol {
     // test for writing a valid zero size block
     sendBuf.reset();
     recvBuf.reset();
-    sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId),
-        BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-        new DatanodeInfo[1], null,
-        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L,
-        DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy());
+    writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
 
     hdr = new PacketHeader(
       8,     // size of packet
@@ -532,4 +512,18 @@ public class TestDataTransferProtocol {
     assertTrue(hdr.sanityCheck(99));
     assertFalse(hdr.sanityCheck(100));
   }
+
+  void writeBlock(String poolId, long blockId, DataChecksum checksum) throws IOException {
+    writeBlock(new ExtendedBlock(poolId, blockId),
+        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, checksum);
+  }
+
+  void writeBlock(ExtendedBlock block, BlockConstructionStage stage,
+      long newGS, DataChecksum checksum) throws IOException {
+    sender.writeBlock(block, StorageType.DEFAULT,
+        BlockTokenSecretManager.DUMMY_TOKEN, "cl",
+        new DatanodeInfo[1], new StorageType[1], null, stage,
+        0, block.getNumBytes(), block.getNumBytes(), newGS,
+        checksum, CachingStrategy.newDefaultStrategy());
+  }
 }

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Wed Jul 23 01:47:28 2014
@@ -30,6 +30,8 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -79,6 +81,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -97,6 +100,8 @@ public class TestFileCreation {
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
+  private static final String RPC_DETAILED_METRICS =
+      "RpcDetailedActivityForPort";
 
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
@@ -371,7 +376,7 @@ public class TestFileCreation {
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
-    
+
     UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
         "testuser", new String[]{"testgroup"});
     FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@@ -380,12 +385,16 @@ public class TestFileCreation {
         return FileSystem.get(cluster.getConfiguration(0));
       }
     });
-    
+
+    String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
+
     try {
       Path p = new Path("/testfile");
       FSDataOutputStream stm1 = fs.create(p);
       stm1.write(1);
 
+      assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
+
       // Create file again without overwrite
       try {
         fs2.create(p, false);
@@ -394,7 +403,9 @@ public class TestFileCreation {
         GenericTestUtils.assertExceptionContains("already being created by",
             abce);
       }
-      
+      // NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
+      assertCounter("AlreadyBeingCreatedExceptionNumOps",
+          6L, getMetrics(metricsName));
       FSDataOutputStream stm2 = fs2.create(p, true);
       stm2.write(2);
       stm2.close();

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Wed Jul 23 01:47:28 2014
@@ -25,14 +25,16 @@ import java.net.InetSocketAddress;
 
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -76,16 +78,22 @@ public class TestIsMethodSupported {
 
   @Test
   public void testNamenodeProtocol() throws IOException {
-    NamenodeProtocolTranslatorPB translator =
-        (NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf,
+    NamenodeProtocol np =
+        NameNodeProxies.createNonHAProxy(conf,
             nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
             true).getProxy();
-    boolean exists = translator.isMethodSupported("rollEditLog");
+
+    boolean exists = RpcClientUtil.isMethodSupported(np,
+        NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
+
     assertTrue(exists);
-    exists = translator.isMethodSupported("bogusMethod");
+    exists = RpcClientUtil.isMethodSupported(np,
+        NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
     assertFalse(exists);
   }
-  
+
   @Test
   public void testDatanodeProtocol() throws IOException {
     DatanodeProtocolClientSideTranslatorPB translator = 
@@ -107,16 +115,18 @@ public class TestIsMethodSupported {
         NetUtils.getDefaultSocketFactory(conf));
     assertTrue(translator.isMethodSupported("refreshNamenodes"));
   }
-  
+
   @Test
   public void testClientNamenodeProtocol() throws IOException {
-    ClientNamenodeProtocolTranslatorPB translator = 
-        (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
+    ClientProtocol cp =
+        NameNodeProxies.createNonHAProxy(
             conf, nnAddress, ClientProtocol.class,
             UserGroupInformation.getCurrentUser(), true).getProxy();
-    assertTrue(translator.isMethodSupported("mkdirs"));
+    RpcClientUtil.isMethodSupported(cp,
+        ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+        RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
   }
-  
+
   @Test
   public void tesJournalProtocol() throws IOException {
     JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Wed Jul 23 01:47:28 2014
@@ -550,8 +550,10 @@ public class TestPBHelper {
     dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
     dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
     String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
+    StorageType[][] storageTypes = {{StorageType.DEFAULT},
+        {StorageType.DEFAULT, StorageType.DEFAULT}};
     BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
-        blocks, dnInfos, storageIDs);
+        blocks, dnInfos, storageTypes, storageIDs);
     BlockCommandProto bcProto = PBHelper.convert(bc);
     BlockCommand bc2 = PBHelper.convert(bcProto);
     assertEquals(bc.getAction(), bc2.getAction());

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Wed Jul 23 01:47:28 2014
@@ -89,7 +89,8 @@ public class TestDelegationTokenForProxy
     builder.append("127.0.1.1,");
     builder.append(InetAddress.getLocalHost().getCanonicalHostName());
     LOG.info("Local Ip addresses: " + builder.toString());
-    conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserIpConfKey(superUserShortName),
         builder.toString());
   }
   
@@ -101,7 +102,8 @@ public class TestDelegationTokenForProxy
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
-    config.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER),
+    config.setStrings(DefaultImpersonationProvider.getTestProvider().
+            getProxySuperuserGroupConfKey(REAL_USER),
         "group1");
     config.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Wed Jul 23 01:47:28 2014
@@ -905,49 +905,46 @@ public class TestReplicationPolicy {
    */
   @Test
   public void testChooseReplicaToDelete() throws Exception {
-    List<DatanodeDescriptor> replicaNodeList = new 
-        ArrayList<DatanodeDescriptor>();
-    final Map<String, List<DatanodeDescriptor>> rackMap
-        = new HashMap<String, List<DatanodeDescriptor>>();
+    List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
+    final Map<String, List<DatanodeStorageInfo>> rackMap
+        = new HashMap<String, List<DatanodeStorageInfo>>();
     
     dataNodes[0].setRemaining(4*1024*1024);
-    replicaNodeList.add(dataNodes[0]);
+    replicaList.add(storages[0]);
     
     dataNodes[1].setRemaining(3*1024*1024);
-    replicaNodeList.add(dataNodes[1]);
+    replicaList.add(storages[1]);
     
     dataNodes[2].setRemaining(2*1024*1024);
-    replicaNodeList.add(dataNodes[2]);
+    replicaList.add(storages[2]);
     
     dataNodes[5].setRemaining(1*1024*1024);
-    replicaNodeList.add(dataNodes[5]);
+    replicaList.add(storages[5]);
     
     // Refresh the last update time for all the datanodes
     for (int i = 0; i < dataNodes.length; i++) {
       dataNodes[i].setLastUpdate(Time.now());
     }
     
-    List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
-    List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
-    replicator.splitNodesWithRack(
-        replicaNodeList, rackMap, first, second);
-    // dataNodes[0] and dataNodes[1] are in first set as their rack has two 
-    // replica nodes, while datanodes[2] and dataNodes[5] are in second set.
+    List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
+    List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
+    replicator.splitNodesWithRack(replicaList, rackMap, first, second);
+    // storages[0] and storages[1] are in first set as their rack has two 
+    // replica nodes, while storages[2] and dataNodes[5] are in second set.
     assertEquals(2, first.size());
     assertEquals(2, second.size());
-    DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+    DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
         null, null, (short)3, first, second);
-    // Within first set, dataNodes[1] with less free space
-    assertEquals(chosenNode, dataNodes[1]);
+    // Within first set, storages[1] with less free space
+    assertEquals(chosen, storages[1]);
 
-    replicator.adjustSetsWithChosenReplica(
-        rackMap, first, second, chosenNode);
+    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
     assertEquals(0, first.size());
     assertEquals(3, second.size());
-    // Within second set, dataNodes[5] with less free space
-    chosenNode = replicator.chooseReplicaToDelete(
+    // Within second set, storages[5] with less free space
+    chosen = replicator.chooseReplicaToDelete(
         null, null, (short)2, first, second);
-    assertEquals(chosenNode, dataNodes[5]);
+    assertEquals(chosen, storages[5]);
   }
   
   /**

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java Wed Jul 23 01:47:28 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.test.PathUtils;
@@ -101,6 +102,7 @@ public class TestReplicationPolicyConsid
     }
   }
 
+  private final double EPSILON = 0.0001;
   /**
    * Tests that chooseTarget with considerLoad set to true correctly calculates
    * load with decommissioned nodes.
@@ -109,14 +111,6 @@ public class TestReplicationPolicyConsid
   public void testChooseTargetWithDecomNodes() throws IOException {
     namenode.getNamesystem().writeLock();
     try {
-      // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
-      // returns false
-      for (int i = 0; i < 3; i++) {
-        DatanodeInfo d = dnManager.getDatanodeByXferAddr(
-            dnrList.get(i).getIpAddr(),
-            dnrList.get(i).getXferPort());
-        d.setDecommissioned();
-      }
       String blockPoolId = namenode.getNamesystem().getBlockPoolId();
       dnManager.handleHeartbeat(dnrList.get(3),
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -133,6 +127,20 @@ public class TestReplicationPolicyConsid
           blockPoolId, dataNodes[5].getCacheCapacity(),
           dataNodes[5].getCacheRemaining(),
           4, 0, 0);
+      // value in the above heartbeats
+      final int load = 2 + 4 + 4;
+      
+      FSNamesystem fsn = namenode.getNamesystem();
+      assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);
+      
+      // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
+      // returns false
+      for (int i = 0; i < 3; i++) {
+        DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
+        dnManager.startDecommission(d);
+        d.setDecommissioned();
+      }
+      assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);
 
       // Call chooseTarget()
       DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Wed Jul 23 01:47:28 2014
@@ -591,51 +591,50 @@ public class TestReplicationPolicyWithNo
    */
   @Test
   public void testChooseReplicaToDelete() throws Exception {
-    List<DatanodeDescriptor> replicaNodeList = 
-        new ArrayList<DatanodeDescriptor>();
-    final Map<String, List<DatanodeDescriptor>> rackMap = 
-        new HashMap<String, List<DatanodeDescriptor>>();
+    List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
+    final Map<String, List<DatanodeStorageInfo>> rackMap
+        = new HashMap<String, List<DatanodeStorageInfo>>();
     dataNodes[0].setRemaining(4*1024*1024);
-    replicaNodeList.add(dataNodes[0]);
+    replicaList.add(storages[0]);
 
     dataNodes[1].setRemaining(3*1024*1024);
-    replicaNodeList.add(dataNodes[1]);
+    replicaList.add(storages[1]);
 
     dataNodes[2].setRemaining(2*1024*1024);
-    replicaNodeList.add(dataNodes[2]);
+    replicaList.add(storages[2]);
 
     dataNodes[5].setRemaining(1*1024*1024);
-    replicaNodeList.add(dataNodes[5]);
+    replicaList.add(storages[5]);
 
-    List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
-    List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
+    List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
+    List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
     replicator.splitNodesWithRack(
-        replicaNodeList, rackMap, first, second);
+        replicaList, rackMap, first, second);
     assertEquals(3, first.size());
     assertEquals(1, second.size());
-    DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+    DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
         null, null, (short)3, first, second);
     // Within first set {dataNodes[0], dataNodes[1], dataNodes[2]}, 
     // dataNodes[0] and dataNodes[1] are in the same nodegroup, 
     // but dataNodes[1] is chosen as less free space
-    assertEquals(chosenNode, dataNodes[1]);
+    assertEquals(chosen, storages[1]);
 
-    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
     assertEquals(2, first.size());
     assertEquals(1, second.size());
     // Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen
     // as less free space
-    chosenNode = replicator.chooseReplicaToDelete(
+    chosen = replicator.chooseReplicaToDelete(
         null, null, (short)2, first, second);
-    assertEquals(chosenNode, dataNodes[2]);
+    assertEquals(chosen, storages[2]);
 
-    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+    replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
     assertEquals(0, first.size());
     assertEquals(2, second.size());
     // Within second set, dataNodes[5] with less free space
-    chosenNode = replicator.chooseReplicaToDelete(
+    chosen = replicator.chooseReplicaToDelete(
         null, null, (short)1, first, second);
-    assertEquals(chosenNode, dataNodes[5]);
+    assertEquals(chosen, storages[5]);
   }
   
   /**

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Wed Jul 23 01:47:28 2014
@@ -285,8 +285,10 @@ public class TestJspHelper {
     String user = "TheNurse";
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     
-    conf.set(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(realUser), "*");
-    conf.set(DefaultImpersonationProvider.getProxySuperuserIpConfKey(realUser), "*");
+    conf.set(DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserGroupConfKey(realUser), "*");
+    conf.set(DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserIpConfKey(realUser), "*");
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation ugi;

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java Wed Jul 23 01:47:28 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -324,7 +325,7 @@ public abstract class BlockReportTestBas
   public void blockReport_03() throws IOException {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
-    ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
+    writeFile(METHOD_NAME, FILE_SIZE, filePath);
 
     // all blocks belong to the same file, hence same BP
     DataNode dn = cluster.getDataNodes().get(DN_N0);
@@ -363,7 +364,7 @@ public abstract class BlockReportTestBas
     // Create a bogus new block which will not be present on the namenode.
     ExtendedBlock b = new ExtendedBlock(
         poolId, rand.nextLong(), 1024L, rand.nextLong());
-    dn.getFSDataset().createRbw(b);
+    dn.getFSDataset().createRbw(StorageType.DEFAULT, b);
 
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
     StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Wed Jul 23 01:47:28 2014
@@ -744,14 +744,14 @@ public class SimulatedFSDataset implemen
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b) 
-  throws IOException {
-    return createTemporary(b);
+  public synchronized ReplicaInPipelineInterface createRbw(
+      StorageType storageType, ExtendedBlock b) throws IOException {
+    return createTemporary(storageType, b);
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
-      throws IOException {
+  public synchronized ReplicaInPipelineInterface createTemporary(
+      StorageType storageType, ExtendedBlock b) throws IOException {
     if (isValidBlock(b)) {
           throw new ReplicaAlreadyExistsException("Block " + b + 
               " is valid, and cannot be written to.");

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Wed Jul 23 01:47:28 2014
@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -531,7 +532,7 @@ public class TestBlockRecovery {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Running " + GenericTestUtils.getMethodName());
     }
-    dn.data.createRbw(block);
+    dn.data.createRbw(StorageType.DEFAULT, block);
     try {
       dn.syncBlock(rBlock, initBlockRecords(dn));
       fail("Sync should fail");
@@ -554,7 +555,8 @@ public class TestBlockRecovery {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Running " + GenericTestUtils.getMethodName());
     }
-    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
+    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
+        StorageType.DEFAULT, block);
     ReplicaOutputStreams streams = null;
     try {
       streams = replicaInfo.createStreams(true,

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Wed Jul 23 01:47:28 2014
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -264,7 +265,8 @@ public class TestBlockReplacement {
     sock.setKeepAlive(true);
     // sendRequest
     DataOutputStream out = new DataOutputStream(sock.getOutputStream());
-    new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
+    new Sender(out).replaceBlock(block, StorageType.DEFAULT,
+        BlockTokenSecretManager.DUMMY_TOKEN,
         source.getDatanodeUuid(), sourceProxy);
     out.flush();
     // receiveResponse

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Wed Jul 23 01:47:28 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -147,9 +148,9 @@ public class TestDiskError {
 
     DataChecksum checksum = DataChecksum.newDataChecksum(
         DataChecksum.Type.CRC32, 512);
-    new Sender(out).writeBlock(block.getBlock(),
+    new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
         BlockTokenSecretManager.DUMMY_TOKEN, "",
-        new DatanodeInfo[0], null,
+        new DatanodeInfo[0], new StorageType[0], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
         checksum, CachingStrategy.newDefaultStrategy());
     out.flush();

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Wed Jul 23 01:47:28 2014
@@ -29,6 +29,7 @@ import java.io.OutputStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -65,7 +66,8 @@ public class TestSimulatedFSDataset {
       ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
       // we pass expected len as zero, - fsdataset should use the sizeof actual
       // data written
-      ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
+      ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
+          StorageType.DEFAULT, b);
       ReplicaOutputStreams out = bInfo.createStreams(true,
           DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
       try {

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java Wed Jul 23 01:47:28 2014
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
@@ -147,7 +148,7 @@ public class TestWriteToReplica {
     };
     
     ReplicaMap replicasMap = dataSet.volumeMap;
-    FsVolumeImpl vol = dataSet.volumes.getNextVolume(0);
+    FsVolumeImpl vol = dataSet.volumes.getNextVolume(StorageType.DEFAULT, 0);
     ReplicaInfo replicaInfo = new FinalizedReplica(
         blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
     replicasMap.add(bpid, replicaInfo);
@@ -357,7 +358,7 @@ public class TestWriteToReplica {
     }
  
     try {
-      dataSet.createRbw(blocks[FINALIZED]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[FINALIZED]);
       Assert.fail("Should not have created a replica that's already " +
       		"finalized " + blocks[FINALIZED]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -375,7 +376,7 @@ public class TestWriteToReplica {
     }
 
     try {
-      dataSet.createRbw(blocks[TEMPORARY]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[TEMPORARY]);
       Assert.fail("Should not have created a replica that had created as " +
       		"temporary " + blocks[TEMPORARY]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -385,7 +386,7 @@ public class TestWriteToReplica {
         0L, blocks[RBW].getNumBytes());  // expect to be successful
     
     try {
-      dataSet.createRbw(blocks[RBW]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[RBW]);
       Assert.fail("Should not have created a replica that had created as RBW " +
           blocks[RBW]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -401,7 +402,7 @@ public class TestWriteToReplica {
     }
 
     try {
-      dataSet.createRbw(blocks[RWR]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[RWR]);
       Assert.fail("Should not have created a replica that was waiting to be " +
       		"recovered " + blocks[RWR]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -417,7 +418,7 @@ public class TestWriteToReplica {
     }
 
     try {
-      dataSet.createRbw(blocks[RUR]);
+      dataSet.createRbw(StorageType.DEFAULT, blocks[RUR]);
       Assert.fail("Should not have created a replica that was under recovery " +
           blocks[RUR]);
     } catch (ReplicaAlreadyExistsException e) {
@@ -434,45 +435,45 @@ public class TestWriteToReplica {
           e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
     }
     
-    dataSet.createRbw(blocks[NON_EXISTENT]);
+    dataSet.createRbw(StorageType.DEFAULT, blocks[NON_EXISTENT]);
   }
   
   private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
     try {
-      dataSet.createTemporary(blocks[FINALIZED]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[FINALIZED]);
       Assert.fail("Should not have created a temporary replica that was " +
       		"finalized " + blocks[FINALIZED]);
     } catch (ReplicaAlreadyExistsException e) {
     }
  
     try {
-      dataSet.createTemporary(blocks[TEMPORARY]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[TEMPORARY]);
       Assert.fail("Should not have created a replica that had created as" +
       		"temporary " + blocks[TEMPORARY]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
-      dataSet.createTemporary(blocks[RBW]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[RBW]);
       Assert.fail("Should not have created a replica that had created as RBW " +
           blocks[RBW]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
-      dataSet.createTemporary(blocks[RWR]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[RWR]);
       Assert.fail("Should not have created a replica that was waiting to be " +
       		"recovered " + blocks[RWR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
-      dataSet.createTemporary(blocks[RUR]);
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[RUR]);
       Assert.fail("Should not have created a replica that was under recovery " +
           blocks[RUR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
-    dataSet.createTemporary(blocks[NON_EXISTENT]);
+    dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
   }
 }

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java Wed Jul 23 01:47:28 2014
@@ -18,32 +18,41 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-import java.util.ArrayList;
-import java.util.Random;
-import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Random;
 
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.Test;
+import org.apache.log4j.Level;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Test;
 
 
 public class TestFavoredNodesEndToEnd {
+  {
+    ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)).getLogger().setLevel(Level.ALL);
+  }
+
   private static MiniDFSCluster cluster;
   private static Configuration conf;
   private final static int NUM_DATA_NODES = 10;
@@ -79,7 +88,7 @@ public class TestFavoredNodesEndToEnd {
       InetSocketAddress datanode[] = getDatanodes(rand);
       Path p = new Path("/filename"+i);
       FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
-          4096, (short)3, (long)4096, null, datanode);
+          4096, (short)3, 4096L, null, datanode);
       out.write(SOME_BYTES);
       out.close();
       BlockLocation[] locations = getBlockLocations(p);
@@ -98,14 +107,13 @@ public class TestFavoredNodesEndToEnd {
     //get some other nodes. In other words, the write to hdfs should not fail
     //and if we do getBlockLocations on the file, we should see one blklocation
     //and three hosts for that
-    Random rand = new Random(System.currentTimeMillis());
     InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3];
     for (int i = 0; i < 3; i++) {
       arbitraryAddrs[i] = getArbitraryLocalHostAddr();
     }
     Path p = new Path("/filename-foo-bar");
     FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
-        4096, (short)3, (long)4096, null, arbitraryAddrs);
+        4096, (short)3, 4096L, null, arbitraryAddrs);
     out.write(SOME_BYTES);
     out.close();
     getBlockLocations(p);
@@ -113,35 +121,41 @@ public class TestFavoredNodesEndToEnd {
 
   @Test(timeout=180000)
   public void testWhenSomeNodesAreNotGood() throws Exception {
+    // 4 favored nodes
+    final InetSocketAddress addrs[] = new InetSocketAddress[4];
+    final String[] hosts = new String[addrs.length];
+    for (int i = 0; i < addrs.length; i++) {
+      addrs[i] = datanodes.get(i).getXferAddress();
+      hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
+    }
+
     //make some datanode not "good" so that even if the client prefers it,
     //the namenode would not give it as a replica to write to
     DatanodeInfo d = cluster.getNameNode().getNamesystem().getBlockManager()
            .getDatanodeManager().getDatanodeByXferAddr(
-               datanodes.get(0).getXferAddress().getAddress().getHostAddress(), 
-               datanodes.get(0).getXferAddress().getPort());
+               addrs[0].getAddress().getHostAddress(), addrs[0].getPort());
     //set the decommission status to true so that 
     //BlockPlacementPolicyDefault.isGoodTarget returns false for this dn
     d.setDecommissioned();
-    InetSocketAddress addrs[] = new InetSocketAddress[3];
-    for (int i = 0; i < 3; i++) {
-      addrs[i] = datanodes.get(i).getXferAddress();
-    }
     Path p = new Path("/filename-foo-bar-baz");
+    final short replication = (short)3;
     FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
-        4096, (short)3, (long)4096, null, addrs);
+        4096, replication, 4096L, null, addrs);
     out.write(SOME_BYTES);
     out.close();
     //reset the state
     d.stopDecommission();
+
     BlockLocation[] locations = getBlockLocations(p);
+    Assert.assertEquals(replication, locations[0].getNames().length);;
     //also make sure that the datanode[0] is not in the list of hosts
-    String datanode0 = 
-        datanodes.get(0).getXferAddress().getAddress().getHostAddress()
-        + ":" + datanodes.get(0).getXferAddress().getPort();
-    for (int i = 0; i < 3; i++) {
-      if (locations[0].getNames()[i].equals(datanode0)) {
-        fail(datanode0 + " not supposed to be a replica for the block");
-      }
+    for (int i = 0; i < replication; i++) {
+      final String loc = locations[0].getNames()[i];
+      int j = 0;
+      for(; j < hosts.length && !loc.equals(hosts[j]); j++);
+      Assert.assertTrue("j=" + j, j > 0);
+      Assert.assertTrue("loc=" + loc + " not in host list "
+          + Arrays.asList(hosts) + ", j=" + j, j < hosts.length);
     }
   }
 

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java Wed Jul 23 01:47:28 2014
@@ -18,9 +18,11 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static org.junit.Assert.*;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -28,12 +30,21 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.junit.Test;
 
 
@@ -153,4 +164,177 @@ public class TestNamenodeCapacityReport 
       if (cluster != null) {cluster.shutdown();}
     }
   }
+  
+  private static final float EPSILON = 0.0001f;
+  @Test
+  public void testXceiverCount() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    // don't waste time retrying if close fails
+    conf.setInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 0);
+    MiniDFSCluster cluster = null;
+
+    final int nodes = 8;
+    final int fileCount = 5;
+    final short fileRepl = 3;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
+      cluster.waitActive();
+
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
+      List<DataNode> datanodes = cluster.getDataNodes();
+      final DistributedFileSystem fs = cluster.getFileSystem();
+
+      // trigger heartbeats in case not already sent
+      triggerHeartbeats(datanodes);
+      
+      // check that all nodes are live and in service
+      int expectedTotalLoad = nodes;  // xceiver server adds 1 to load
+      int expectedInServiceNodes = nodes;
+      int expectedInServiceLoad = nodes;
+      assertEquals(nodes, namesystem.getNumLiveDataNodes());
+      assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+      assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+          namesystem.getInServiceXceiverAverage(), EPSILON);
+      
+      // shutdown half the nodes and force a heartbeat check to ensure
+      // counts are accurate
+      for (int i=0; i < nodes/2; i++) {
+        DataNode dn = datanodes.get(i);
+        DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
+        dn.shutdown();
+        dnd.setLastUpdate(0L);
+        BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+        expectedInServiceNodes--;
+        assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
+        assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+      }
+
+      // restart the nodes to verify that counts are correct after
+      // node re-registration 
+      cluster.restartDataNodes();
+      cluster.waitActive();
+      datanodes = cluster.getDataNodes();
+      expectedInServiceNodes = nodes;
+      assertEquals(nodes, datanodes.size());
+      assertEquals(nodes, namesystem.getNumLiveDataNodes());
+      assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+      assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+          namesystem.getInServiceXceiverAverage(), EPSILON);
+      
+      // create streams and hsync to force datastreamers to start
+      DFSOutputStream[] streams = new DFSOutputStream[fileCount];
+      for (int i=0; i < fileCount; i++) {
+        streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
+            .getWrappedStream();
+        streams[i].write("1".getBytes());
+        streams[i].hsync();
+        // the load for writers is 2 because both the write xceiver & packet
+        // responder threads are counted in the load
+        expectedTotalLoad += 2*fileRepl;
+        expectedInServiceLoad += 2*fileRepl;
+      }
+      // force nodes to send load update
+      triggerHeartbeats(datanodes);
+      assertEquals(nodes, namesystem.getNumLiveDataNodes());
+      assertEquals(expectedInServiceNodes,
+          namesystem.getNumDatanodesInService());
+      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+      assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+          namesystem.getInServiceXceiverAverage(), EPSILON);
+
+      // decomm a few nodes, substract their load from the expected load,
+      // trigger heartbeat to force load update
+      for (int i=0; i < fileRepl; i++) {
+        expectedInServiceNodes--;
+        DatanodeDescriptor dnd =
+            dnm.getDatanode(datanodes.get(i).getDatanodeId());
+        expectedInServiceLoad -= dnd.getXceiverCount();
+        dnm.startDecommission(dnd);
+        DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
+        Thread.sleep(100);
+        assertEquals(nodes, namesystem.getNumLiveDataNodes());
+        assertEquals(expectedInServiceNodes,
+            namesystem.getNumDatanodesInService());
+        assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+        assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+            namesystem.getInServiceXceiverAverage(), EPSILON);
+      }
+      
+      // check expected load while closing each stream.  recalc expected
+      // load based on whether the nodes in the pipeline are decomm
+      for (int i=0; i < fileCount; i++) {
+        int decomm = 0;
+        for (DatanodeInfo dni : streams[i].getPipeline()) {
+          DatanodeDescriptor dnd = dnm.getDatanode(dni);
+          expectedTotalLoad -= 2;
+          if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
+            decomm++;
+          } else {
+            expectedInServiceLoad -= 2;
+          }
+        }
+        try {
+          streams[i].close();
+        } catch (IOException ioe) {
+          // nodes will go decommissioned even if there's a UC block whose
+          // other locations are decommissioned too.  we'll ignore that
+          // bug for now
+          if (decomm < fileRepl) {
+            throw ioe;
+          }
+        }
+        triggerHeartbeats(datanodes);
+        // verify node count and loads 
+        assertEquals(nodes, namesystem.getNumLiveDataNodes());
+        assertEquals(expectedInServiceNodes,
+            namesystem.getNumDatanodesInService());
+        assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+        assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+            namesystem.getInServiceXceiverAverage(), EPSILON);
+      }
+
+      // shutdown each node, verify node counts based on decomm state
+      for (int i=0; i < nodes; i++) {
+        DataNode dn = datanodes.get(i);
+        dn.shutdown();
+        // force it to appear dead so live count decreases
+        DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
+        dnDesc.setLastUpdate(0L);
+        BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+        assertEquals(nodes-1-i, namesystem.getNumLiveDataNodes());
+        // first few nodes are already out of service
+        if (i >= fileRepl) {
+          expectedInServiceNodes--;
+        }
+        assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+        
+        // live nodes always report load of 1.  no nodes is load 0
+        double expectedXceiverAvg = (i == nodes-1) ? 0.0 : 1.0;
+        assertEquals((double)expectedXceiverAvg,
+            namesystem.getInServiceXceiverAverage(), EPSILON);
+      }
+      
+      // final sanity check
+      assertEquals(0, namesystem.getNumLiveDataNodes());
+      assertEquals(0, namesystem.getNumDatanodesInService());
+      assertEquals(0.0, namesystem.getTotalLoad(), EPSILON);
+      assertEquals(0.0, namesystem.getInServiceXceiverAverage(), EPSILON);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  private void triggerHeartbeats(List<DataNode> datanodes)
+      throws IOException, InterruptedException {
+    for (DataNode dn : datanodes) {
+      DataNodeTestUtils.triggerHeartbeat(dn);
+    }
+    Thread.sleep(100);
+  }
 }

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java Wed Jul 23 01:47:28 2014
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertTrue;
 
 import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 
 import org.apache.hadoop.conf.Configuration;
@@ -30,11 +32,15 @@ import org.apache.hadoop.hdfs.server.com
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 /**
  * This class tests various upgrade cases from earlier versions to current
  * version with and without clusterid.
  */
+@RunWith(value = Parameterized.class)
 public class TestStartupOptionUpgrade {
 
   private Configuration conf;
@@ -42,10 +48,21 @@ public class TestStartupOptionUpgrade {
   private int layoutVersion;
   NNStorage storage;
 
+  @Parameters
+  public static Collection<Object[]> startOption() {
+    Object[][] params = new Object[][] { { StartupOption.UPGRADE },
+        { StartupOption.UPGRADEONLY } };
+    return Arrays.asList(params);
+  }
+
+  public TestStartupOptionUpgrade(StartupOption startOption) {
+    super();
+    this.startOpt = startOption;
+  }
+  
   @Before
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
-    startOpt = StartupOption.UPGRADE;
     startOpt.setClusterId(null);
     storage = new NNStorage(conf,
       Collections.<URI>emptyList(),

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Wed Jul 23 01:47:28 2014
@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -585,15 +585,14 @@ public class TestDNFencing {
     }
 
     @Override
-    public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
+    public DatanodeStorageInfo chooseReplicaToDelete(BlockCollection inode,
         Block block, short replicationFactor,
-        Collection<DatanodeDescriptor> first,
-        Collection<DatanodeDescriptor> second) {
+        Collection<DatanodeStorageInfo> first,
+        Collection<DatanodeStorageInfo> second) {
       
-      Collection<DatanodeDescriptor> chooseFrom =
-        !first.isEmpty() ? first : second;
+      Collection<DatanodeStorageInfo> chooseFrom = !first.isEmpty() ? first : second;
 
-      List<DatanodeDescriptor> l = Lists.newArrayList(chooseFrom);
+      List<DatanodeStorageInfo> l = Lists.newArrayList(chooseFrom);
       return l.get(DFSUtil.getRandom().nextInt(l.size()));
     }
   }

Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Wed Jul 23 01:47:28 2014
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -299,7 +300,8 @@ public class TestDelegationTokensWithHA 
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
     
     URI haUri = new URI("hdfs://my-ha-uri/");
-    token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri));
+    token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,
+        HdfsConstants.HDFS_URI_SCHEME));
     ugi.addToken(token);
 
     Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
@@ -355,7 +357,8 @@ public class TestDelegationTokensWithHA 
   @Test
   public void testDFSGetCanonicalServiceName() throws Exception {
     URI hAUri = HATestUtil.getLogicalUri(cluster);
-    String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
+    String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
+        HdfsConstants.HDFS_URI_SCHEME).toString();
     assertEquals(haService, dfs.getCanonicalServiceName());
     final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
     final Token<DelegationTokenIdentifier> token =
@@ -371,7 +374,8 @@ public class TestDelegationTokensWithHA 
     Configuration conf = dfs.getConf();
     URI haUri = HATestUtil.getLogicalUri(cluster);
     AbstractFileSystem afs =  AbstractFileSystem.createFileSystem(haUri, conf);    
-    String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString();
+    String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
+        HdfsConstants.HDFS_URI_SCHEME).toString();
     assertEquals(haService, afs.getCanonicalServiceName());
     Token<?> token = afs.getDelegationTokens(
         UserGroupInformation.getCurrentUser().getShortUserName()).get(0);