You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/03/09 21:25:27 UTC

[01/50] [abbrv] hadoop git commit: MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu Kobayashi.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 720901acf -> edc476bfc


MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e208eeed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e208eeed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e208eeed

Branch: refs/heads/HDFS-7285
Commit: e208eeed517d22a6d05fd3c5b61078eb175511f1
Parents: c110aab
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Tue Mar 3 16:21:16 2015 +0900
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:22 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                              | 3 +++
 .../src/site/markdown/HistoryServerRest.md                        | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e208eeed/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ccd24a6..5fd7d30 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -399,6 +399,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6223. TestJobConf#testNegativeValueForTaskVmem failures. 
     (Varun Saxena via kasha)
 
+    MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
+    via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e208eeed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
index 8a78754..b4ce00a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
@@ -1889,7 +1889,7 @@ A Task Attempt resource contains information about a particular task attempt wit
 
 Use the following URI to obtain an Task Attempt Object, from a task identified by the attemptid value.
 
-      * http://<history server http address:port>/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempt/{attemptid}
+      * http://<history server http address:port>/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
 
 #### HTTP Operations Supported
 


[50/50] [abbrv] hadoop git commit: HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. Contributed by Jing Zhao.

Posted by ji...@apache.org.
HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edc476bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edc476bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edc476bf

Branch: refs/heads/HDFS-7285
Commit: edc476bfce31e8e9b20ad9b23a8312e186dc8cab
Parents: 35a08d8
Author: Jing Zhao <ji...@apache.org>
Authored: Mon Mar 2 13:44:33 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:21:19 2015 -0700

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/DecommissionManager.java    | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edc476bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index dc17abe..3765dd0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -545,7 +545,7 @@ public class DecommissionManager {
       int underReplicatedInOpenFiles = 0;
       while (it.hasNext()) {
         numBlocksChecked++;
-        final BlockInfoContiguous block = (BlockInfoContiguous) it.next();
+        final BlockInfo block = it.next();
         // Remove the block from the list if it's no longer in the block map,
         // e.g. the containing file has been deleted
         if (blockManager.blocksMap.getStoredBlock(block) == null) {
@@ -579,8 +579,9 @@ public class DecommissionManager {
         }
 
         // Even if the block is under-replicated, 
-        // it doesn't block decommission if it's sufficiently replicated 
-        if (isSufficientlyReplicated(block, bc, num)) {
+        // it doesn't block decommission if it's sufficiently replicated
+        BlockInfoContiguous blk = (BlockInfoContiguous) block;
+        if (isSufficientlyReplicated(blk, bc, num)) {
           if (pruneSufficientlyReplicated) {
             it.remove();
           }
@@ -589,7 +590,7 @@ public class DecommissionManager {
 
         // We've found an insufficiently replicated block.
         if (insufficientlyReplicated != null) {
-          insufficientlyReplicated.add(block);
+          insufficientlyReplicated.add(blk);
         }
         // Log if this is our first time through
         if (firstReplicationLog) {


[37/50] [abbrv] hadoop git commit: HDFS-7885. Datanode should not trust the generation stamp provided by client. Contributed by Tsz Wo Nicholas Sze.

Posted by ji...@apache.org.
HDFS-7885. Datanode should not trust the generation stamp provided by client. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/055267d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/055267d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/055267d5

Branch: refs/heads/HDFS-7285
Commit: 055267d50cffb51f28f271f27016df23fae2d222
Parents: 7070347
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Mar 6 10:55:56 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:26 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 15 +++++
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java | 63 ++++++++++++++++++++
 3 files changed, 81 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/055267d5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 763d327..e622a57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1104,6 +1104,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
 
+    HDFS-7885. Datanode should not trust the generation stamp provided by
+    client. (Tsz Wo Nicholas Sze via jing9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/055267d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index cc6220a..58f5615 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2568,6 +2568,21 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override // FsDatasetSpi
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
       throws IOException {
+    synchronized(this) {
+      final Replica replica = volumeMap.get(block.getBlockPoolId(),
+          block.getBlockId());
+      if (replica == null) {
+        throw new ReplicaNotFoundException(block);
+      }
+      if (replica.getGenerationStamp() < block.getGenerationStamp()) {
+        throw new IOException(
+            "Replica generation stamp < block generation stamp, block="
+            + block + ", replica=" + replica);
+      } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
+        block.setGenerationStamp(replica.getGenerationStamp());
+      }
+    }
+
     File datafile = getBlockFile(block);
     File metafile = FsDatasetUtil.getMetaFile(datafile, block.getGenerationStamp());
     BlockLocalPathInfo info = new BlockLocalPathInfo(block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/055267d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
index cb50539..1c4134f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
@@ -30,11 +30,16 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.BeforeClass;
@@ -153,4 +158,62 @@ public class TestBlockReaderLocalLegacy {
     Arrays.equals(orig, buf);
     cluster.shutdown();
   }
+
+  @Test(timeout=20000)
+  public void testBlockReaderLocalLegacyWithAppend() throws Exception {
+    final short REPL_FACTOR = 1;
+    final HdfsConfiguration conf = getConfiguration(null);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
+
+    final MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+    final Path path = new Path("/testBlockReaderLocalLegacy");
+    DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0);
+    DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
+
+    final ClientDatanodeProtocol proxy;
+    final Token<BlockTokenIdentifier> token;
+    final ExtendedBlock originalBlock;
+    final long originalGS;
+    {
+      final LocatedBlock lb = cluster.getNameNode().getRpcServer()
+          .getBlockLocations(path.toString(), 0, 1).get(0);
+      proxy = DFSUtil.createClientDatanodeProtocolProxy(
+          lb.getLocations()[0], conf, 60000, false);
+      token = lb.getBlockToken();
+
+      // get block and generation stamp
+      final ExtendedBlock blk = new ExtendedBlock(lb.getBlock());
+      originalBlock = new ExtendedBlock(blk);
+      originalGS = originalBlock.getGenerationStamp();
+
+      // test getBlockLocalPathInfo
+      final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
+      Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
+    }
+
+    { // append one byte
+      FSDataOutputStream out = dfs.append(path);
+      out.write(1);
+      out.close();
+    }
+
+    {
+      // get new generation stamp
+      final LocatedBlock lb = cluster.getNameNode().getRpcServer()
+          .getBlockLocations(path.toString(), 0, 1).get(0);
+      final long newGS = lb.getBlock().getGenerationStamp();
+      Assert.assertTrue(newGS > originalGS);
+
+      // getBlockLocalPathInfo using the original block.
+      Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
+      final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(
+          originalBlock, token);
+      Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
+    }
+    cluster.shutdown();
+  }
 }


[16/50] [abbrv] hadoop git commit: YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending jobs. (Siqi Li via kasha)

Posted by ji...@apache.org.
YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending jobs. (Siqi Li via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d138804e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d138804e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d138804e

Branch: refs/heads/HDFS-7285
Commit: d138804e49735995653a37efa19589f9cdf13879
Parents: 521a196
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Mar 4 18:06:36 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:24 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../scheduler/fair/FairScheduler.java           |   1 +
 .../scheduler/fair/MaxRunningAppsEnforcer.java  |  40 ++-
 .../scheduler/fair/TestFairScheduler.java       | 310 ++++++++++++++++++-
 4 files changed, 348 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d138804e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0b71bee..9a52325 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
 
     YARN-3131. YarnClientImpl should check FAILED and KILLED state in
     submitApplication (Chang Li via jlowe)
+    
+    YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
+    jobs. (Siqi Li via kasha)
 
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d138804e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 2b59716..e8a9555 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1477,6 +1477,7 @@ public class FairScheduler extends
         allocConf = queueInfo;
         allocConf.getDefaultSchedulingPolicy().initialize(clusterResource);
         queueMgr.updateAllocationConfiguration(allocConf);
+        maxRunningEnforcer.updateRunnabilityOnReload();
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d138804e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
index 2c90edd..f750438 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/MaxRunningAppsEnforcer.java
@@ -105,6 +105,26 @@ public class MaxRunningAppsEnforcer {
   }
 
   /**
+   * This is called after reloading the allocation configuration when the
+   * scheduler is reinitilized
+   *
+   * Checks to see whether any non-runnable applications become runnable
+   * now that the max running apps of given queue has been changed
+   *
+   * Runs in O(n) where n is the number of apps that are non-runnable and in
+   * the queues that went from having no slack to having slack.
+   */
+  public void updateRunnabilityOnReload() {
+    FSParentQueue rootQueue = scheduler.getQueueManager().getRootQueue();
+    List<List<FSAppAttempt>> appsNowMaybeRunnable =
+        new ArrayList<List<FSAppAttempt>>();
+
+    gatherPossiblyRunnableAppLists(rootQueue, appsNowMaybeRunnable);
+
+    updateAppsRunnability(appsNowMaybeRunnable, Integer.MAX_VALUE);
+  }
+
+  /**
    * Checks to see whether any other applications runnable now that the given
    * application has been removed from the given queue.  And makes them so.
    * 
@@ -156,6 +176,19 @@ public class MaxRunningAppsEnforcer {
       }
     }
 
+    updateAppsRunnability(appsNowMaybeRunnable,
+        appsNowMaybeRunnable.size());
+  }
+
+  /**
+   * Checks to see whether applications are runnable now by iterating
+   * through each one of them and check if the queue and user have slack
+   *
+   * if we know how many apps can be runnable, there is no need to iterate
+   * through all apps, maxRunnableApps is used to break out of the iteration
+   */
+  private void updateAppsRunnability(List<List<FSAppAttempt>>
+      appsNowMaybeRunnable, int maxRunnableApps) {
     // Scan through and check whether this means that any apps are now runnable
     Iterator<FSAppAttempt> iter = new MultiListStartTimeIterator(
         appsNowMaybeRunnable);
@@ -173,9 +206,7 @@ public class MaxRunningAppsEnforcer {
         next.getQueue().addApp(appSched, true);
         noLongerPendingApps.add(appSched);
 
-        // No more than one app per list will be able to be made runnable, so
-        // we can stop looking after we've found that many
-        if (noLongerPendingApps.size() >= appsNowMaybeRunnable.size()) {
+        if (noLongerPendingApps.size() >= maxRunnableApps) {
           break;
         }
       }
@@ -194,11 +225,10 @@ public class MaxRunningAppsEnforcer {
       
       if (!usersNonRunnableApps.remove(appSched.getUser(), appSched)) {
         LOG.error("Waiting app " + appSched + " expected to be in "
-        		+ "usersNonRunnableApps, but was not. This should never happen.");
+            + "usersNonRunnableApps, but was not. This should never happen.");
       }
     }
   }
-  
   /**
    * Updates the relevant tracking variables after a runnable app with the given
    * queue and user has been removed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d138804e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index c29dbfc..9fadba9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -2288,7 +2288,315 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     // Request should be fulfilled
     assertEquals(2, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
   }
-  
+
+  @Test (timeout = 5000)
+  public void testIncreaseQueueMaxRunningAppsOnTheFly() throws Exception {
+  String allocBefore = "<?xml version=\"1.0\"?>" +
+        "<allocations>" +
+        "<queue name=\"root\">" +
+        "<queue name=\"queue1\">" +
+        "<maxRunningApps>1</maxRunningApps>" +
+        "</queue>" +
+        "</queue>" +
+        "</allocations>";
+
+    String allocAfter = "<?xml version=\"1.0\"?>" +
+        "<allocations>" +
+        "<queue name=\"root\">" +
+        "<queue name=\"queue1\">" +
+        "<maxRunningApps>3</maxRunningApps>" +
+        "</queue>" +
+        "</queue>" +
+        "</allocations>";
+
+    testIncreaseQueueSettingOnTheFlyInternal(allocBefore, allocAfter);
+  }
+
+  @Test (timeout = 5000)
+  public void testIncreaseUserMaxRunningAppsOnTheFly() throws Exception {
+    String allocBefore = "<?xml version=\"1.0\"?>"+
+        "<allocations>"+
+        "<queue name=\"root\">"+
+        "<queue name=\"queue1\">"+
+        "<maxRunningApps>10</maxRunningApps>"+
+        "</queue>"+
+        "</queue>"+
+        "<user name=\"user1\">"+
+        "<maxRunningApps>1</maxRunningApps>"+
+        "</user>"+
+        "</allocations>";
+
+    String allocAfter = "<?xml version=\"1.0\"?>"+
+        "<allocations>"+
+        "<queue name=\"root\">"+
+        "<queue name=\"queue1\">"+
+        "<maxRunningApps>10</maxRunningApps>"+
+        "</queue>"+
+        "</queue>"+
+        "<user name=\"user1\">"+
+        "<maxRunningApps>3</maxRunningApps>"+
+        "</user>"+
+        "</allocations>";
+
+    testIncreaseQueueSettingOnTheFlyInternal(allocBefore, allocAfter);
+  }
+
+  private void testIncreaseQueueSettingOnTheFlyInternal(String allocBefore,
+      String allocAfter) throws Exception {
+    // Set max running apps
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+
+    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println(allocBefore);
+    out.close();
+
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // Add a node
+    RMNode node1 =
+        MockNodes
+            .newNodeInfo(1, Resources.createResource(8192, 8), 1, "127.0.0.1");
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+
+    // Request for app 1
+    ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
+    scheduler.handle(updateEvent);
+
+    // App 1 should be running
+    assertEquals(1, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
+
+    ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    ApplicationAttemptId attId3 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    ApplicationAttemptId attId4 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 2 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
+    // App 3 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId3).getLiveContainers().size());
+    // App 4 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+
+    out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println(allocAfter);
+    out.close();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 2 should be running
+    assertEquals(1, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 3 should be running
+    assertEquals(1, scheduler.getSchedulerApp(attId3).getLiveContainers().size());
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 4 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+
+    // Now remove app 1
+    AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(
+        attId1, RMAppAttemptState.FINISHED, false);
+
+    scheduler.handle(appRemovedEvent1);
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 4 should be running
+    assertEquals(1, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+  }
+
+  @Test (timeout = 5000)
+  public void testDecreaseQueueMaxRunningAppsOnTheFly() throws Exception {
+  String allocBefore = "<?xml version=\"1.0\"?>" +
+        "<allocations>" +
+        "<queue name=\"root\">" +
+        "<queue name=\"queue1\">" +
+        "<maxRunningApps>3</maxRunningApps>" +
+        "</queue>" +
+        "</queue>" +
+        "</allocations>";
+
+    String allocAfter = "<?xml version=\"1.0\"?>" +
+        "<allocations>" +
+        "<queue name=\"root\">" +
+        "<queue name=\"queue1\">" +
+        "<maxRunningApps>1</maxRunningApps>" +
+        "</queue>" +
+        "</queue>" +
+        "</allocations>";
+
+    testDecreaseQueueSettingOnTheFlyInternal(allocBefore, allocAfter);
+  }
+
+  @Test (timeout = 5000)
+  public void testDecreaseUserMaxRunningAppsOnTheFly() throws Exception {
+    String allocBefore = "<?xml version=\"1.0\"?>"+
+        "<allocations>"+
+        "<queue name=\"root\">"+
+        "<queue name=\"queue1\">"+
+        "<maxRunningApps>10</maxRunningApps>"+
+        "</queue>"+
+        "</queue>"+
+        "<user name=\"user1\">"+
+        "<maxRunningApps>3</maxRunningApps>"+
+        "</user>"+
+        "</allocations>";
+
+    String allocAfter = "<?xml version=\"1.0\"?>"+
+        "<allocations>"+
+        "<queue name=\"root\">"+
+        "<queue name=\"queue1\">"+
+        "<maxRunningApps>10</maxRunningApps>"+
+        "</queue>"+
+        "</queue>"+
+        "<user name=\"user1\">"+
+        "<maxRunningApps>1</maxRunningApps>"+
+        "</user>"+
+        "</allocations>";
+
+    testDecreaseQueueSettingOnTheFlyInternal(allocBefore, allocAfter);
+  }
+
+  private void testDecreaseQueueSettingOnTheFlyInternal(String allocBefore,
+      String allocAfter) throws Exception {
+    // Set max running apps
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+
+    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println(allocBefore);
+    out.close();
+
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // Add a node
+    RMNode node1 =
+        MockNodes
+            .newNodeInfo(1, Resources.createResource(8192, 8), 1, "127.0.0.1");
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+
+    // Request for app 1
+    ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
+    scheduler.handle(updateEvent);
+
+    // App 1 should be running
+    assertEquals(1, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
+
+    ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    ApplicationAttemptId attId3 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    ApplicationAttemptId attId4 = createSchedulingRequest(1024, "queue1",
+        "user1", 1);
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 2 should be running
+    assertEquals(1, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
+    // App 3 should be running
+    assertEquals(1, scheduler.getSchedulerApp(attId3).getLiveContainers().size());
+    // App 4 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+
+    out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println(allocAfter);
+    out.close();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 2 should still be running
+    assertEquals(1, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 3 should still be running
+    assertEquals(1, scheduler.getSchedulerApp(attId3).getLiveContainers().size());
+
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 4 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+
+    // Now remove app 1
+    AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(
+        attId1, RMAppAttemptState.FINISHED, false);
+
+    scheduler.handle(appRemovedEvent1);
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 4 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+
+    // Now remove app 2
+    appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(
+        attId2, RMAppAttemptState.FINISHED, false);
+
+    scheduler.handle(appRemovedEvent1);
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 4 should not be running
+    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+
+    // Now remove app 3
+    appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(
+        attId3, RMAppAttemptState.FINISHED, false);
+
+    scheduler.handle(appRemovedEvent1);
+    scheduler.update();
+    scheduler.handle(updateEvent);
+
+    // App 4 should be running now
+    assertEquals(1, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
+  }
+
   @Test (timeout = 5000)
   public void testReservationWhileMultiplePriorities() throws IOException {
     scheduler.init(conf);


[23/50] [abbrv] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
deleted file mode 100644
index 935be61..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.apache.hadoop.yarn.util.StringHelper.join;
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-
-import com.google.inject.Inject;
-
-class AppsBlock extends HtmlBlock {
-  final ConcurrentMap<ApplicationId, RMApp> apps;
-  private final Configuration conf;
-  final ResourceManager rm;
-  @Inject
-  AppsBlock(ResourceManager rm, ViewContext ctx, Configuration conf) {
-    super(ctx);
-    apps = rm.getRMContext().getRMApps();
-    this.conf = conf;
-    this.rm = rm;
-  }
-
-  @Override public void render(Block html) {
-    TBODY<TABLE<Hamlet>> tbody = html.
-      table("#apps").
-        thead().
-          tr().
-            th(".id", "ID").
-            th(".user", "User").
-            th(".name", "Name").
-            th(".type", "Application Type").
-            th(".queue", "Queue").
-            th(".starttime", "StartTime").
-            th(".finishtime", "FinishTime").
-            th(".state", "YarnApplicationState").
-            th(".finalstatus", "FinalStatus").
-            th(".progress", "Progress").
-            th(".ui", "Tracking UI")._()._().
-        tbody();
-    Collection<YarnApplicationState> reqAppStates = null;
-    String reqStateString = $(APP_STATE);
-    if (reqStateString != null && !reqStateString.isEmpty()) {
-      String[] appStateStrings = reqStateString.split(",");
-      reqAppStates = new HashSet<YarnApplicationState>(appStateStrings.length);
-      for(String stateString : appStateStrings) {
-        reqAppStates.add(YarnApplicationState.valueOf(stateString));
-      }
-    }
-    StringBuilder appsTableData = new StringBuilder("[\n");
-    for (RMApp app : apps.values()) {
-      if (reqAppStates != null && !reqAppStates.contains(app.createApplicationState())) {
-        continue;
-      }
-      AppInfo appInfo = new AppInfo(rm, app, true, WebAppUtils.getHttpSchemePrefix(conf));
-      String percent = String.format("%.1f", appInfo.getProgress());
-      //AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
-      appsTableData.append("[\"<a href='")
-      .append(url("app", appInfo.getAppId())).append("'>")
-      .append(appInfo.getAppId()).append("</a>\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getUser()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getName()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getApplicationType()))).append("\",\"")
-      .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
-        appInfo.getQueue()))).append("\",\"")
-      .append(appInfo.getStartTime()).append("\",\"")
-      .append(appInfo.getFinishTime()).append("\",\"")
-      .append(appInfo.getState()).append("\",\"")
-      .append(appInfo.getFinalStatus() == FinalApplicationStatus.UNDEFINED ?
-          "N/A" : appInfo.getFinalStatus()).append("\",\"")
-      // Progress bar
-      .append("<br title='").append(percent)
-      .append("'> <div class='").append(C_PROGRESSBAR).append("' title='")
-      .append(join(percent, '%')).append("'> ").append("<div class='")
-      .append(C_PROGRESSBAR_VALUE).append("' style='")
-      .append(join("width:", percent, '%')).append("'> </div> </div>")
-      .append("\",\"<a href='");
-
-      String trackingURL =
-        !appInfo.isTrackingUrlReady()? "#" : appInfo.getTrackingUrlPretty();
-      
-      appsTableData.append(trackingURL).append("'>")
-      .append(appInfo.getTrackingUI()).append("</a>\"],\n");
-
-    }
-    if(appsTableData.charAt(appsTableData.length() - 2) == ',') {
-      appsTableData.delete(appsTableData.length()-2, appsTableData.length()-1);
-    }
-    appsTableData.append("]");
-    html.script().$type("text/javascript").
-    _("var appsTableData=" + appsTableData)._();
-
-    tbody._()._();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
index 6d461f6..cb0836a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 83df72b..028bb31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerLeafQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
new file mode 100644
index 0000000..b8cd1ad
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+import org.apache.hadoop.yarn.server.webapp.ContainerBlock;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+
+public class ContainerPage extends RmView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+
+    String containerId = $(YarnWebParams.CONTAINER_ID);
+    set(TITLE, containerId.isEmpty() ? "Bad request: missing container ID"
+        : join("Container ", $(YarnWebParams.CONTAINER_ID)));
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return ContainerBlock.class;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
index e05987b..1c8828c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
+import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
index 8c54f4e..97ab872 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerLeafQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerQueueInfo;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@@ -234,6 +235,11 @@ public class FairSchedulerPage extends RmView {
     return QueuesBlock.class;
   }
 
+  @Override
+  protected String initAppsTable() {
+    return WebPageUtils.appsTableInit(true);
+  }
+
   static String percent(float f) {
     return String.format("%.1f%%", f * 100);
   }
@@ -245,19 +251,4 @@ public class FairSchedulerPage extends RmView {
   static String left(float f) {
     return String.format("left:%.1f%%", f * 100);
   }
-  
-  @Override
-  protected String getAppsTableColumnDefs() {
-    StringBuilder sb = new StringBuilder();
-    return sb
-      .append("[\n")
-      .append("{'sType':'numeric', 'aTargets': [0]")
-      .append(", 'mRender': parseHadoopID }")
-
-      .append("\n, {'sType':'numeric', 'aTargets': [6, 7]")
-      .append(", 'mRender': renderHadoopDate }")
-
-      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
-      .append(", 'mRender': parseHadoopProgress }]").toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
index c0e6834..4189053 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
 import java.net.InetSocketAddress;
 
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.RMHAUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
@@ -53,6 +54,7 @@ public class RMWebApp extends WebApp implements YarnWebParams {
 
     if (rm != null) {
       bind(ResourceManager.class).toInstance(rm);
+      bind(ApplicationBaseProtocol.class).toInstance(rm.getClientRMService());
     }
     route("/", RmController.class);
     route(pajoin("/nodes", NODE_STATE), RmController.class, "nodes");
@@ -62,6 +64,9 @@ public class RMWebApp extends WebApp implements YarnWebParams {
     route("/scheduler", RmController.class, "scheduler");
     route(pajoin("/queue", QUEUE_NAME), RmController.class, "queue");
     route("/nodelabels", RmController.class, "nodelabels");
+    route(pajoin("/appattempt", APPLICATION_ATTEMPT_ID), RmController.class,
+      "appattempt");
+    route(pajoin("/container", CONTAINER_ID), RmController.class, "container");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
index 972432b..cc5232e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
@@ -54,6 +54,14 @@ public class RmController extends Controller {
     render(AppPage.class);
   }
 
+  public void appattempt() {
+    render(AppAttemptPage.class);
+  }
+
+  public void container() {
+    render(ContainerPage.class);
+  }
+
   public void nodes() {
     render(NodesPage.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
index 769c4da..1a437f8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
 
@@ -35,7 +36,7 @@ public class RmView extends TwoColumnLayout {
   protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "apps");
-    set(initID(DATATABLES, "apps"), appsTableInit());
+    set(initID(DATATABLES, "apps"), initAppsTable());
     setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
 
     // Set the correct title.
@@ -59,31 +60,7 @@ public class RmView extends TwoColumnLayout {
     return AppsBlockWithMetrics.class;
   }
 
-  private String appsTableInit() {
-    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
-    return tableInit()
-      .append(", 'aaData': appsTableData")
-      .append(", bDeferRender: true")
-      .append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ")
-      .append(getAppsTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-  
-  protected String getAppsTableColumnDefs() {
-    StringBuilder sb = new StringBuilder();
-    return sb
-      .append("[\n")
-      .append("{'sType':'string', 'aTargets': [0]")
-      .append(", 'mRender': parseHadoopID }")
-
-      .append("\n, {'sType':'numeric', 'aTargets': [5, 6]")
-      .append(", 'mRender': renderHadoopDate }")
-
-      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
-      .append(", 'mRender': parseHadoopProgress }]").toString();
+  protected String initAppsTable() {
+    return WebPageUtils.appsTableInit();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
index 9732c19..8c7b14d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -32,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.webapp.AppBlock;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
 import org.junit.Test;
@@ -75,8 +77,10 @@ public class TestAppPage {
               @Override
               public void configure(Binder binder) {
                 try {
-                  binder.bind(ResourceManager.class).toInstance(
-                      TestRMWebApp.mockRm(rmContext));
+                  ResourceManager rm = TestRMWebApp.mockRm(rmContext);
+                  binder.bind(ResourceManager.class).toInstance(rm);
+                  binder.bind(ApplicationBaseProtocol.class).toInstance(
+                    rm.getClientRMService());
                 } catch (IOException e) {
                   throw new IllegalStateException(e);
                 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
index fb1e61d..481a53b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -21,19 +21,30 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 import static org.apache.hadoop.yarn.server.resourcemanager.MockNodes.newResource;
 import static org.apache.hadoop.yarn.webapp.Params.TITLE;
 import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
@@ -54,6 +65,7 @@ import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.webapp.WebApps;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Assert;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
@@ -87,7 +99,10 @@ public class TestRMWebApp {
       @Override
       public void configure(Binder binder) {
         try {
-          binder.bind(ResourceManager.class).toInstance(mockRm(3, 1, 2, 8*GiB));
+          ResourceManager mockRm = mockRm(3, 1, 2, 8*GiB);
+          binder.bind(ResourceManager.class).toInstance(mockRm);
+          binder.bind(ApplicationBaseProtocol.class)
+              .toInstance(mockRm.getClientRMService());
         } catch (IOException e) {
           throw new IllegalStateException(e);
         }
@@ -194,9 +209,11 @@ public class TestRMWebApp {
     ResourceManager rm = mock(ResourceManager.class);
     ResourceScheduler rs = mockCapacityScheduler();
     ApplicationACLsManager aclMgr = mockAppACLsManager();
+    ClientRMService clientRMService = mockClientRMService(rmContext);
     when(rm.getResourceScheduler()).thenReturn(rs);
     when(rm.getRMContext()).thenReturn(rmContext);
     when(rm.getApplicationACLsManager()).thenReturn(aclMgr);
+    when(rm.getClientRMService()).thenReturn(clientRMService);
     return rm;
   }
 
@@ -222,6 +239,35 @@ public class TestRMWebApp {
     return new ApplicationACLsManager(conf);
   }
 
+  public static ClientRMService mockClientRMService(RMContext rmContext) {
+    ClientRMService clientRMService = mock(ClientRMService.class);
+    List<ApplicationReport> appReports = new ArrayList<ApplicationReport>();
+    for (RMApp app : rmContext.getRMApps().values()) {
+      ApplicationReport appReport =
+          ApplicationReport.newInstance(
+              app.getApplicationId(), (ApplicationAttemptId) null,
+              app.getUser(), app.getQueue(),
+              app.getName(), (String) null, 0, (Token) null,
+              app.createApplicationState(),
+              app.getDiagnostics().toString(), (String) null,
+              app.getStartTime(), app.getFinishTime(),
+              app.getFinalApplicationStatus(),
+              (ApplicationResourceUsageReport) null, app.getTrackingUrl(),
+              app.getProgress(), app.getApplicationType(), (Token) null);
+      appReports.add(appReport);
+    }
+    GetApplicationsResponse response = mock(GetApplicationsResponse.class);
+    when(response.getApplicationList()).thenReturn(appReports);
+    try {
+      when(clientRMService.getApplications(any(GetApplicationsRequest.class)))
+          .thenReturn(response);
+    } catch (YarnException e) {
+      Assert.fail("Exception is not expteced.");
+    }
+    return clientRMService;
+  }
+
+
   static void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
     // Define top-level queues
     conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
index b850a5e..06fa0d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
@@ -24,10 +24,12 @@ import com.google.inject.Injector;
 import com.google.inject.Module;
 
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
@@ -73,7 +75,8 @@ public class TestRMWebAppFairScheduler {
                   mockRm(rmContext);
               binder.bind(ResourceManager.class).toInstance
                   (mockRmWithFairScheduler);
-
+              binder.bind(ApplicationBaseProtocol.class).toInstance(
+                mockRmWithFairScheduler.getClientRMService());
             } catch (IOException e) {
               throw new IllegalStateException(e);
             }
@@ -112,6 +115,8 @@ public class TestRMWebAppFairScheduler {
                   mockRmWithApps(rmContext);
               binder.bind(ResourceManager.class).toInstance
                   (mockRmWithFairScheduler);
+              binder.bind(ApplicationBaseProtocol.class).toInstance(
+                  mockRmWithFairScheduler.getClientRMService());
 
             } catch (IOException e) {
               throw new IllegalStateException(e);
@@ -168,8 +173,10 @@ public class TestRMWebAppFairScheduler {
       IOException {
     ResourceManager rm = mock(ResourceManager.class);
     ResourceScheduler rs = mockFairScheduler();
+    ClientRMService clientRMService = mockClientRMService(rmContext);
     when(rm.getResourceScheduler()).thenReturn(rs);
     when(rm.getRMContext()).thenReturn(rmContext);
+    when(rm.getClientRMService()).thenReturn(clientRMService);
     return rm;
   }
 
@@ -188,8 +195,10 @@ public class TestRMWebAppFairScheduler {
       IOException {
     ResourceManager rm = mock(ResourceManager.class);
     ResourceScheduler rs =  mockFairSchedulerWithoutApps(rmContext);
+    ClientRMService clientRMService = mockClientRMService(rmContext);
     when(rm.getResourceScheduler()).thenReturn(rs);
     when(rm.getRMContext()).thenReturn(rmContext);
+    when(rm.getClientRMService()).thenReturn(clientRMService);
     return rm;
   }
 
@@ -213,4 +222,7 @@ public class TestRMWebAppFairScheduler {
     return fs;
   }
 
+  public static ClientRMService mockClientRMService(RMContext rmContext) {
+    return mock(ClientRMService.class);
+  }
 }


[13/50] [abbrv] hadoop git commit: HDFS-7869. Inconsistency in the return information while performing rolling upgrade ( Contributed by J.Andreina )

Posted by ji...@apache.org.
HDFS-7869. Inconsistency in the return information while performing rolling upgrade ( Contributed by J.Andreina )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/871bd4e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/871bd4e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/871bd4e6

Branch: refs/heads/HDFS-7285
Commit: 871bd4e688b83d63296b604d92d99c781a8977ff
Parents: 7814f50
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Mar 4 14:38:38 2015 +0530
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java   | 6 +++---
 .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java  | 3 +--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 .../test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java   | 4 ++--
 5 files changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ff3c78..2037973 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1083,6 +1083,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file
     includes non-snapshotted content. (Charles Lamb via atm)
 
+    HDFS-7869. Inconsistency in the return information while performing rolling
+    upgrade ( J.Andreina via vinayakumarb )
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index bd2a203..621ebef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7500,7 +7500,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
-  void finalizeRollingUpgrade() throws IOException {
+  RollingUpgradeInfo finalizeRollingUpgrade() throws IOException {
     checkSuperuserPrivilege();
     checkOperation(OperationCategory.WRITE);
     writeLock();
@@ -7508,7 +7508,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       checkOperation(OperationCategory.WRITE);
       if (!isRollingUpgrade()) {
-        return;
+        return null;
       }
       checkNameNodeSafeMode("Failed to finalize rolling upgrade");
 
@@ -7533,7 +7533,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
       logAuditEvent(true, "finalizeRollingUpgrade", null, null, null);
     }
-    return;
+    return returnInfo;
   }
 
   RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 9ccdb40..f20fb35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1145,8 +1145,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
     case PREPARE:
       return namesystem.startRollingUpgrade();
     case FINALIZE:
-      namesystem.finalizeRollingUpgrade();
-      return null;
+      return namesystem.finalizeRollingUpgrade();
     default:
       throw new UnsupportedActionException(action + " is not yet supported.");
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index abbd46b..e80b4c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -341,7 +341,7 @@ public class DFSAdmin extends FsShell {
     private static void printMessage(RollingUpgradeInfo info,
         PrintStream out) {
       if (info != null && info.isStarted()) {
-        if (!info.createdRollbackImages()) {
+        if (!info.createdRollbackImages() && !info.isFinalized()) {
           out.println(
               "Preparing for upgrade. Data is being saved for rollback."
               + "\nRun \"dfsadmin -rollingUpgrade query\" to check the status"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/871bd4e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 9746049..8baebd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -244,7 +244,7 @@ public class TestRollingUpgrade {
       //finalize rolling upgrade
       final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(
           RollingUpgradeAction.FINALIZE);
-      Assert.assertNull(finalize);
+      Assert.assertTrue(finalize.isFinalized());
 
       LOG.info("RESTART cluster 2 with regular startup option");
       cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
@@ -405,7 +405,7 @@ public class TestRollingUpgrade {
       Assert.assertTrue(fsimage.hasRollbackFSImage());
 
       info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
-      Assert.assertNull(info);
+      Assert.assertTrue(info.isFinalized());
       Assert.assertTrue(dfs.exists(foo));
 
       // Once finalized, there should be no more fsimage for rollbacks.


[07/50] [abbrv] hadoop git commit: MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Akira AJISAKA.

Posted by ji...@apache.org.
MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a3ef07f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a3ef07f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a3ef07f

Branch: refs/heads/HDFS-7285
Commit: 4a3ef07f4a3dbbb56eedc368a0123e02bc803850
Parents: e208eee
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Tue Mar 3 18:06:26 2015 +0900
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:22 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../hadoop/mapred/TaskAttemptListenerImpl.java  |  4 +-
 .../hadoop/mapreduce/v2/app/JobEndNotifier.java |  1 -
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  2 +-
 .../hadoop/filecache/DistributedCache.java      |  2 +-
 .../org/apache/hadoop/mapred/ClusterStatus.java |  4 +-
 .../apache/hadoop/mapred/FileOutputFormat.java  |  2 +-
 .../java/org/apache/hadoop/mapred/IFile.java    |  2 +-
 .../apache/hadoop/mapred/JobACLsManager.java    |  1 -
 .../org/apache/hadoop/mapred/JobClient.java     |  8 ++--
 .../java/org/apache/hadoop/mapred/JobConf.java  | 49 +++++++++-----------
 .../java/org/apache/hadoop/mapred/Mapper.java   |  2 +-
 .../org/apache/hadoop/mapred/QueueManager.java  | 30 ++++++------
 .../org/apache/hadoop/mapred/RecordReader.java  |  2 +-
 .../java/org/apache/hadoop/mapred/Reducer.java  | 14 +++---
 .../hadoop/mapred/TaskUmbilicalProtocol.java    |  1 -
 .../apache/hadoop/mapred/lib/ChainMapper.java   | 40 ++++++++--------
 .../apache/hadoop/mapred/lib/ChainReducer.java  | 44 +++++++++---------
 .../hadoop/mapred/lib/MultipleOutputs.java      | 29 +++++-------
 .../hadoop/mapred/lib/TokenCountMapper.java     |  2 +-
 .../lib/aggregate/ValueAggregatorJob.java       |  2 +-
 .../lib/aggregate/ValueAggregatorReducer.java   |  3 +-
 .../hadoop/mapred/lib/db/DBInputFormat.java     |  4 +-
 .../org/apache/hadoop/mapreduce/Cluster.java    |  1 +
 .../apache/hadoop/mapreduce/ClusterMetrics.java |  6 +--
 .../apache/hadoop/mapreduce/CryptoUtils.java    | 10 ++--
 .../java/org/apache/hadoop/mapreduce/Job.java   |  2 +-
 .../org/apache/hadoop/mapreduce/JobContext.java |  2 -
 .../hadoop/mapreduce/JobSubmissionFiles.java    |  2 +-
 .../org/apache/hadoop/mapreduce/Mapper.java     |  9 ++--
 .../org/apache/hadoop/mapreduce/Reducer.java    | 12 ++---
 .../mapreduce/filecache/DistributedCache.java   |  5 +-
 .../lib/aggregate/ValueAggregatorJob.java       |  2 +-
 .../hadoop/mapreduce/lib/chain/Chain.java       |  4 +-
 .../hadoop/mapreduce/lib/chain/ChainMapper.java | 10 ++--
 .../mapreduce/lib/chain/ChainReducer.java       | 14 +++---
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  |  2 +-
 .../hadoop/mapreduce/lib/db/DBWritable.java     |  2 +-
 .../mapreduce/lib/join/TupleWritable.java       |  2 +-
 .../mapreduce/lib/map/MultithreadedMapper.java  |  6 +--
 .../mapreduce/lib/output/FileOutputFormat.java  |  2 +-
 .../mapreduce/lib/output/MultipleOutputs.java   | 11 ++---
 .../lib/partition/BinaryPartitioner.java        |  2 +-
 .../hadoop/mapreduce/task/JobContextImpl.java   |  2 -
 .../hadoop/mapreduce/RandomTextWriter.java      |  4 +-
 .../apache/hadoop/mapreduce/RandomWriter.java   |  5 +-
 .../hadoop/examples/MultiFileWordCount.java     |  2 +-
 .../apache/hadoop/examples/QuasiMonteCarlo.java |  4 +-
 .../hadoop/examples/RandomTextWriter.java       |  4 +-
 .../apache/hadoop/examples/RandomWriter.java    |  5 +-
 .../apache/hadoop/examples/SecondarySort.java   |  2 +-
 .../org/apache/hadoop/examples/pi/DistBbp.java  |  2 +-
 .../apache/hadoop/examples/pi/math/Modular.java |  2 +-
 .../hadoop/examples/terasort/GenSort.java       |  2 +-
 .../org/apache/hadoop/tools/CopyListing.java    | 14 +++---
 .../java/org/apache/hadoop/tools/DistCp.java    |  4 +-
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  2 +-
 .../org/apache/hadoop/tools/OptionsParser.java  |  2 +-
 .../hadoop/tools/mapred/CopyCommitter.java      |  4 +-
 .../apache/hadoop/tools/mapred/CopyMapper.java  |  5 +-
 .../hadoop/tools/mapred/CopyOutputFormat.java   |  4 +-
 .../tools/mapred/RetriableFileCopyCommand.java  |  3 +-
 .../tools/mapred/UniformSizeInputFormat.java    |  4 +-
 .../tools/mapred/lib/DynamicInputFormat.java    |  4 +-
 .../tools/mapred/lib/DynamicRecordReader.java   | 12 ++---
 .../apache/hadoop/tools/util/DistCpUtils.java   |  2 +-
 .../hadoop/tools/util/RetriableCommand.java     |  2 +-
 .../hadoop/tools/util/ThrottledInputStream.java |  8 ++--
 .../java/org/apache/hadoop/tools/Logalyzer.java |  4 +-
 .../ResourceUsageEmulatorPlugin.java            |  2 +-
 .../fs/swift/http/RestClientBindings.java       |  6 +--
 .../hadoop/fs/swift/http/SwiftRestClient.java   |  6 +--
 .../fs/swift/snative/SwiftNativeFileSystem.java |  6 +--
 .../snative/SwiftNativeFileSystemStore.java     |  6 +--
 .../hadoop/fs/swift/util/SwiftTestUtils.java    |  2 +-
 .../apache/hadoop/tools/rumen/InputDemuxer.java |  4 +-
 .../util/MapReduceJobPropertiesParser.java      |  5 +-
 .../apache/hadoop/tools/rumen/package-info.java |  8 ++--
 78 files changed, 249 insertions(+), 261 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 5fd7d30..5524b14 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -181,6 +181,9 @@ Trunk (Unreleased)
     MAPREDUCE-6234. TestHighRamJob fails due to the change in MAPREDUCE-5785. 
     (Masatake Iwasaki via kasha)
 
+    MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc
+    comments. (Akira AJISAKA via ozawa)
+
   BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS
 
     MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
index 5f39edd..c8f2427 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
@@ -174,7 +174,7 @@ public class TaskAttemptListenerImpl extends CompositeService
   /**
    * Child checking whether it can commit.
    * 
-   * <br/>
+   * <br>
    * Commit is a two-phased protocol. First the attempt informs the
    * ApplicationMaster that it is
    * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
@@ -208,7 +208,7 @@ public class TaskAttemptListenerImpl extends CompositeService
    * TaskAttempt is reporting that it is in commit_pending and it is waiting for
    * the commit Response
    * 
-   * <br/>
+   * <br>
    * Commit it a two-phased protocol. First the attempt informs the
    * ApplicationMaster that it is
    * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
index 981e6ff..05bb40b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
@@ -44,7 +44,6 @@ import org.mortbay.log.Log;
  * proxy if needed</li><li>
  * The URL may contain sentinels which will be replaced by jobId and jobStatus 
  * (eg. SUCCEEDED/KILLED/FAILED) </li> </ul>
- * </p>
  */
 public class JobEndNotifier implements Configurable {
   private static final String JOB_ID = "$jobId";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 1520fc8..e4b43b5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -345,7 +345,7 @@ public class MRApps extends Apps {
    * {@link MRJobConfig#MAPREDUCE_JOB_CLASSLOADER} is set to true, and
    * the APP_CLASSPATH environment variable is set.
    * @param conf
-   * @returns the created job classloader, or null if the job classloader is not
+   * @return the created job classloader, or null if the job classloader is not
    * enabled or the APP_CLASSPATH environment variable is not set
    * @throws IOException
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java
index 370d67d..0783eb5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/filecache/DistributedCache.java
@@ -113,7 +113,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig;
  *       }
  *     }
  *     
- * </pre></blockquote></p>
+ * </pre></blockquote>
  *
  * It is also very common to use the DistributedCache by using
  * {@link org.apache.hadoop.util.GenericOptionsParser}.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java
index 8b56787..904897b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.util.StringInterner;
  *   Task capacity of the cluster. 
  *   </li>
  *   <li>
- *   The number of currently running map & reduce tasks.
+ *   The number of currently running map and reduce tasks.
  *   </li>
  *   <li>
  *   State of the <code>JobTracker</code>.
@@ -56,7 +56,7 @@ import org.apache.hadoop.util.StringInterner;
  *   <li>
  *   Details regarding black listed trackers.
  *   </li>
- * </ol></p>
+ * </ol>
  * 
  * <p>Clients can query for the latest <code>ClusterStatus</code>, via 
  * {@link JobClient#getClusterStatus()}.</p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
index 721c8a8..821c1e8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
@@ -179,7 +179,7 @@ public abstract class FileOutputFormat<K, V> implements OutputFormat<K, V> {
    *  Get the {@link Path} to the task's temporary output directory 
    *  for the map-reduce job
    *  
-   * <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+   * <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
    * 
    * <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
    *  is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
index 30ebd6b..32e07e7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
@@ -47,7 +47,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 /**
- * <code>IFile</code> is the simple <key-len, value-len, key, value> format
+ * <code>IFile</code> is the simple &lt;key-len, value-len, key, value&gt; format
  * for the intermediate map-outputs in Map-Reduce.
  *
  * There is a <code>Writer</code> to write out map-outputs in this format and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
index 37633ab..0dbbe5a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
@@ -101,7 +101,6 @@ public class JobACLsManager {
    * @param jobOperation
    * @param jobOwner
    * @param jobACL
-   * @throws AccessControlException
    */
   public boolean checkAccess(UserGroupInformation callerUGI,
       JobACL jobOperation, String jobOwner, AccessControlList jobACL) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index 89a966e..e91fbfe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -79,7 +79,7 @@ import org.apache.hadoop.util.ToolRunner;
  *   Submitting the job to the cluster and optionally monitoring
  *   it's status.
  *   </li>
- * </ol></p>
+ * </ol>
  *  
  * Normally the user creates the application, describes various facets of the
  * job via {@link JobConf} and then uses the <code>JobClient</code> to submit 
@@ -101,9 +101,9 @@ import org.apache.hadoop.util.ToolRunner;
  *
  *     // Submit the job, then poll for progress until the job is complete
  *     JobClient.runJob(job);
- * </pre></blockquote></p>
+ * </pre></blockquote>
  * 
- * <h4 id="JobControl">Job Control</h4>
+ * <b id="JobControl">Job Control</b>
  * 
  * <p>At times clients would chain map-reduce jobs to accomplish complex tasks 
  * which cannot be done via a single map-reduce job. This is fairly easy since 
@@ -127,7 +127,7 @@ import org.apache.hadoop.util.ToolRunner;
  *   {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
  *   on job-completion, thus avoiding polling.
  *   </li>
- * </ol></p>
+ * </ol>
  * 
  * @see JobConf
  * @see ClusterStatus

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 315c829..c388bda 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -74,7 +74,7 @@ import org.apache.log4j.Level;
  *   more complex for the user to control finely
  *   (e.g. {@link #setNumMapTasks(int)}).
  *   </li>
- * </ol></p>
+ * </ol>
  * 
  * <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner 
  * (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and 
@@ -105,7 +105,7 @@ import org.apache.log4j.Level;
  *     
  *     job.setInputFormat(SequenceFileInputFormat.class);
  *     job.setOutputFormat(SequenceFileOutputFormat.class);
- * </pre></blockquote></p>
+ * </pre></blockquote>
  * 
  * @see JobClient
  * @see ClusterStatus
@@ -486,7 +486,7 @@ public class JobConf extends Configuration {
 
   /** A new map/reduce configuration where the behavior of reading from the
    * default resources can be turned off.
-   * <p/>
+   * <p>
    * If the parameter {@code loadDefaults} is false, the new instance
    * will not load resources from the default files.
    *
@@ -993,19 +993,19 @@ public class JobConf extends Configuration {
   /**
    * Set the user defined {@link RawComparator} comparator for
    * grouping keys in the input to the combiner.
-   * <p/>
+   *
    * <p>This comparator should be provided if the equivalence rules for keys
    * for sorting the intermediates are different from those for grouping keys
    * before each call to
    * {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
-   * <p/>
+   *
    * <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
    * in a single call to the reduce function if K1 and K2 compare as equal.</p>
-   * <p/>
+   *
    * <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
    * how keys are sorted, this can be used in conjunction to simulate
    * <i>secondary sort on values</i>.</p>
-   * <p/>
+   *
    * <p><i>Note</i>: This is not a guarantee of the combiner sort being
    * <i>stable</i> in any sense. (In any case, with the order of available
    * map-outputs to the combiner being non-deterministic, it wouldn't make
@@ -1210,7 +1210,7 @@ public class JobConf extends Configuration {
    *   <li> be side-effect free</li>
    *   <li> have the same input and output key types and the same input and 
    *        output value types</li>
-   * </ul></p>
+   * </ul>
    * 
    * <p>Typically the combiner is same as the <code>Reducer</code> for the  
    * job i.e. {@link #setReducerClass(Class)}.</p>
@@ -1309,7 +1309,7 @@ public class JobConf extends Configuration {
    * A custom {@link InputFormat} is typically used to accurately control 
    * the number of map tasks for the job.</p>
    * 
-   * <h4 id="NoOfMaps">How many maps?</h4>
+   * <b id="NoOfMaps">How many maps?</b>
    * 
    * <p>The number of maps is usually driven by the total size of the inputs 
    * i.e. total number of blocks of the input files.</p>
@@ -1350,7 +1350,7 @@ public class JobConf extends Configuration {
   /**
    * Set the requisite number of reduce tasks for this job.
    * 
-   * <h4 id="NoOfReduces">How many reduces?</h4>
+   * <b id="NoOfReduces">How many reduces?</b>
    * 
    * <p>The right number of reduces seems to be <code>0.95</code> or 
    * <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; * 
@@ -1370,7 +1370,7 @@ public class JobConf extends Configuration {
    * reserve a few reduce slots in the framework for speculative-tasks, failures
    * etc.</p> 
    *
-   * <h4 id="ReducerNone">Reducer NONE</h4>
+   * <b id="ReducerNone">Reducer NONE</b>
    * 
    * <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
    * 
@@ -1693,9 +1693,9 @@ public class JobConf extends Configuration {
    * given task's stdout, stderr, syslog, jobconf files as arguments.</p>
    * 
    * <p>The debug command, run on the node where the map failed, is:</p>
-   * <p><pre><blockquote> 
+   * <p><blockquote><pre>
    * $script $stdout $stderr $syslog $jobconf.
-   * </blockquote></pre></p>
+   * </pre></blockquote>
    * 
    * <p> The script file is distributed through {@link DistributedCache} 
    * APIs. The script needs to be symlinked. </p>
@@ -1705,7 +1705,7 @@ public class JobConf extends Configuration {
    * job.setMapDebugScript("./myscript");
    * DistributedCache.createSymlink(job);
    * DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
-   * </pre></blockquote></p>
+   * </pre></blockquote>
    * 
    * @param mDbgScript the script name
    */
@@ -1730,9 +1730,9 @@ public class JobConf extends Configuration {
    * is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
    * 
    * <p>The debug command, run on the node where the map failed, is:</p>
-   * <p><pre><blockquote> 
+   * <p><blockquote><pre>
    * $script $stdout $stderr $syslog $jobconf.
-   * </blockquote></pre></p>
+   * </pre></blockquote>
    * 
    * <p> The script file is distributed through {@link DistributedCache} 
    * APIs. The script file needs to be symlinked </p>
@@ -1742,7 +1742,7 @@ public class JobConf extends Configuration {
    * job.setReduceDebugScript("./myscript");
    * DistributedCache.createSymlink(job);
    * DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
-   * </pre></blockquote></p>
+   * </pre></blockquote>
    * 
    * @param rDbgScript the script name
    */
@@ -1785,8 +1785,6 @@ public class JobConf extends Configuration {
    * 
    * @param uri the job end notification uri
    * @see JobStatus
-   * @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#
-   *       JobCompletionAndChaining">Job Completion and Chaining</a>
    */
   public void setJobEndNotificationURI(String uri) {
     set(JobContext.MR_JOB_END_NOTIFICATION_URL, uri);
@@ -1816,7 +1814,7 @@ public class JobConf extends Configuration {
    * 
    * If a value is specified in the configuration, it is returned.
    * Else, it returns {@link JobContext#DEFAULT_MAP_MEMORY_MB}.
-   * <p/>
+   * <p>
    * For backward compatibility, if the job configuration sets the
    * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
    * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
@@ -1842,7 +1840,7 @@ public class JobConf extends Configuration {
    * 
    * If a value is specified in the configuration, it is returned.
    * Else, it returns {@link JobContext#DEFAULT_REDUCE_MEMORY_MB}.
-   * <p/>
+   * <p>
    * For backward compatibility, if the job configuration sets the
    * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
    * from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
@@ -1915,7 +1913,6 @@ public class JobConf extends Configuration {
    * 
    * @param my_class the class to find.
    * @return a jar file that contains the class, or null.
-   * @throws IOException
    */
   public static String findContainingJar(Class my_class) {
     return ClassUtil.findContainingJar(my_class);
@@ -1924,10 +1921,10 @@ public class JobConf extends Configuration {
   /**
    * Get the memory required to run a task of this job, in bytes. See
    * {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
-   * <p/>
+   * <p>
    * This method is deprecated. Now, different memory limits can be
    * set for map and reduce tasks of a job, in MB. 
-   * <p/>
+   * <p>
    * For backward compatibility, if the job configuration sets the
    * key {@link #MAPRED_TASK_MAXVMEM_PROPERTY}, that value is returned. 
    * Otherwise, this method will return the larger of the values returned by 
@@ -1953,7 +1950,7 @@ public class JobConf extends Configuration {
   /**
    * Set the maximum amount of memory any task of this job can use. See
    * {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
-   * <p/>
+   * <p>
    * mapred.task.maxvmem is split into
    * mapreduce.map.memory.mb
    * and mapreduce.map.memory.mb,mapred
@@ -2073,7 +2070,7 @@ public class JobConf extends Configuration {
 
   /**
    * Parse the Maximum heap size from the java opts as specified by the -Xmx option
-   * Format: -Xmx<size>[g|G|m|M|k|K]
+   * Format: -Xmx&lt;size&gt;[g|G|m|M|k|K]
    * @param javaOpts String to parse to read maximum heap size
    * @return Maximum heap size in MB or -1 if not specified
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java
index eaa6c2b..ac2c96d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java
@@ -117,7 +117,7 @@ import org.apache.hadoop.io.compress.CompressionCodec;
  *         output.collect(key, val);
  *       }
  *     }
- * </pre></blockquote></p>
+ * </pre></blockquote>
  *
  * <p>Applications may write a custom {@link MapRunnable} to exert greater
  * control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java
index 39fae2a..794c55d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java
@@ -46,20 +46,20 @@ import java.net.URL;
 /**
  * Class that exposes information about queues maintained by the Hadoop
  * Map/Reduce framework.
- * <p/>
+ * <p>
  * The Map/Reduce framework can be configured with one or more queues,
  * depending on the scheduler it is configured with. While some
  * schedulers work only with one queue, some schedulers support multiple
  * queues. Some schedulers also support the notion of queues within
  * queues - a feature called hierarchical queues.
- * <p/>
+ * <p>
  * Queue names are unique, and used as a key to lookup queues. Hierarchical
  * queues are named by a 'fully qualified name' such as q1:q2:q3, where
  * q2 is a child queue of q1 and q3 is a child queue of q2.
- * <p/>
+ * <p>
  * Leaf level queues are queues that contain no queues within them. Jobs
  * can be submitted only to leaf level queues.
- * <p/>
+ * <p>
  * Queues can be configured with various properties. Some of these
  * properties are common to all schedulers, and those are handled by this
  * class. Schedulers might also associate several custom properties with
@@ -69,11 +69,11 @@ import java.net.URL;
  * provided by the framework, but define their own mechanisms. In such cases,
  * it is likely that the name of the queue will be used to relate the
  * common properties of a queue with scheduler specific properties.
- * <p/>
+ * <p>
  * Information related to a queue, such as its name, properties, scheduling
  * information and children are exposed by this class via a serializable
  * class called {@link JobQueueInfo}.
- * <p/>
+ * <p>
  * Queues are configured in the configuration file mapred-queues.xml.
  * To support backwards compatibility, queues can also be configured
  * in mapred-site.xml. However, when configured in the latter, there is
@@ -102,7 +102,7 @@ public class QueueManager {
   /**
    * Factory method to create an appropriate instance of a queue
    * configuration parser.
-   * <p/>
+   * <p>
    * Returns a parser that can parse either the deprecated property
    * style queue configuration in mapred-site.xml, or one that can
    * parse hierarchical queues in mapred-queues.xml. First preference
@@ -157,7 +157,7 @@ public class QueueManager {
   /**
    * Construct a new QueueManager using configuration specified in the passed
    * in {@link org.apache.hadoop.conf.Configuration} object.
-   * <p/>
+   * <p>
    * This instance supports queue configuration specified in mapred-site.xml,
    * but without support for hierarchical queues. If no queue configuration
    * is found in mapred-site.xml, it will then look for site configuration
@@ -173,7 +173,7 @@ public class QueueManager {
   /**
    * Create an instance that supports hierarchical queues, defined in
    * the passed in configuration file.
-   * <p/>
+   * <p>
    * This is mainly used for testing purposes and should not called from
    * production code.
    *
@@ -208,7 +208,7 @@ public class QueueManager {
   /**
    * Return the set of leaf level queues configured in the system to
    * which jobs are submitted.
-   * <p/>
+   * <p>
    * The number of queues configured should be dependent on the Scheduler
    * configured. Note that some schedulers work with only one queue, whereas
    * others can support multiple queues.
@@ -222,7 +222,7 @@ public class QueueManager {
   /**
    * Return true if the given user is part of the ACL for the given
    * {@link QueueACL} name for the given queue.
-   * <p/>
+   * <p>
    * An operation is allowed if all users are provided access for this
    * operation, or if either the user or any of the groups specified is
    * provided access.
@@ -283,7 +283,7 @@ public class QueueManager {
   /**
    * Set a generic Object that represents scheduling information relevant
    * to a queue.
-   * <p/>
+   * <p>
    * A string representation of this Object will be used by the framework
    * to display in user facing applications like the JobTracker web UI and
    * the hadoop CLI.
@@ -323,7 +323,7 @@ public class QueueManager {
 
   /**
    * Refresh acls, state and scheduler properties for the configured queues.
-   * <p/>
+   * <p>
    * This method reloads configuration related to queues, but does not
    * support changes to the list of queues or hierarchy. The expected usage
    * is that an administrator can modify the queue configuration file and
@@ -431,7 +431,7 @@ public class QueueManager {
 
   /**
    * JobQueueInfo for all the queues.
-   * <p/>
+   * <p>
    * Contribs can use this data structure to either create a hierarchy or for
    * traversing.
    * They can also use this to refresh properties in case of refreshQueues
@@ -450,7 +450,7 @@ public class QueueManager {
 
   /**
    * Generates the array of QueueAclsInfo object.
-   * <p/>
+   * <p>
    * The array consists of only those queues for which user has acls.
    *
    * @return QueueAclsInfo[]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
index 0c95a14..6e2c89f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  *   
  * <p><code>RecordReader</code>, typically, converts the byte-oriented view of 
  * the input, provided by the <code>InputSplit</code>, and presents a 
- * record-oriented view for the {@link Mapper} & {@link Reducer} tasks for 
+ * record-oriented view for the {@link Mapper} and {@link Reducer} tasks for
  * processing. It thus assumes the responsibility of processing record 
  * boundaries and presenting the tasks with keys and values.</p>
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java
index 3fefa4b..962e195 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.io.Closeable;
  * <ol>
  *   <li>
  *   
- *   <h4 id="Shuffle">Shuffle</h4>
+ *   <b id="Shuffle">Shuffle</b>
  *   
  *   <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
  *   In the phase the framework, for each <code>Reducer</code>, fetches the 
@@ -51,7 +51,7 @@ import org.apache.hadoop.io.Closeable;
  *   </li>
  *   
  *   <li>
- *   <h4 id="Sort">Sort</h4>
+ *   <b id="Sort">Sort</b>
  *   
  *   <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s 
  *   (since different <code>Mapper</code>s may have output the same key) in this
@@ -60,7 +60,7 @@ import org.apache.hadoop.io.Closeable;
  *   <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
  *   being fetched they are merged.</p>
  *      
- *   <h5 id="SecondarySort">SecondarySort</h5>
+ *   <b id="SecondarySort">SecondarySort</b>
  *   
  *   <p>If equivalence rules for keys while grouping the intermediates are 
  *   different from those for grouping keys before reduction, then one may 
@@ -86,11 +86,11 @@ import org.apache.hadoop.io.Closeable;
  *   </li>
  *   
  *   <li>   
- *   <h4 id="Reduce">Reduce</h4>
+ *   <b id="Reduce">Reduce</b>
  *   
  *   <p>In this phase the 
  *   {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
- *   method is called for each <code>&lt;key, (list of values)></code> pair in
+ *   method is called for each <code>&lt;key, (list of values)&gt;</code> pair in
  *   the grouped inputs.</p>
  *   <p>The output of the reduce task is typically written to the 
  *   {@link FileSystem} via 
@@ -156,7 +156,7 @@ import org.apache.hadoop.io.Closeable;
  *         }
  *       }
  *     }
- * </pre></blockquote></p>
+ * </pre></blockquote>
  * 
  * @see Mapper
  * @see Partitioner
@@ -171,7 +171,7 @@ public interface Reducer<K2, V2, K3, V3> extends JobConfigurable, Closeable {
    * <i>Reduces</i> values for a given key.  
    * 
    * <p>The framework calls this method for each 
-   * <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+   * <code>&lt;key, (list of values)&gt;</code> pair in the grouped inputs.
    * Output values must be of the same type as input values.  Input keys must 
    * not be altered. The framework will <b>reuse</b> the key and value objects
    * that are passed into the reduce, therefore the application should clone

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
index 5df02c7..c3678d6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
@@ -178,7 +178,6 @@ public interface TaskUmbilicalProtocol extends VersionedProtocol {
    *
    * @param taskID task's id
    * @return the most recent checkpoint (if any) for this task
-   * @throws IOException
    */
   TaskCheckpointID getCheckpointID(TaskID taskID);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java
index 14f040a..723a234 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainMapper.java
@@ -29,61 +29,61 @@ import java.io.IOException;
 /**
  * The ChainMapper class allows to use multiple Mapper classes within a single
  * Map task.
- * <p/>
+ * <p>
  * The Mapper classes are invoked in a chained (or piped) fashion, the output of
  * the first becomes the input of the second, and so on until the last Mapper,
  * the output of the last Mapper will be written to the task's output.
- * <p/>
+ * <p>
  * The key functionality of this feature is that the Mappers in the chain do not
  * need to be aware that they are executed in a chain. This enables having
  * reusable specialized Mappers that can be combined to perform composite
  * operations within a single task.
- * <p/>
+ * <p>
  * Special care has to be taken when creating chains that the key/values output
  * by a Mapper are valid for the following Mapper in the chain. It is assumed
  * all Mappers and the Reduce in the chain use maching output and input key and
  * value classes as no conversion is done by the chaining code.
- * <p/>
+ * <p>
  * Using the ChainMapper and the ChainReducer classes is possible to compose
  * Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
  * immediate benefit of this pattern is a dramatic reduction in disk IO.
- * <p/>
+ * <p>
  * IMPORTANT: There is no need to specify the output key/value classes for the
  * ChainMapper, this is done by the addMapper for the last mapper in the chain.
- * <p/>
+ * <p>
  * ChainMapper usage pattern:
- * <p/>
+ * <p>
  * <pre>
  * ...
  * conf.setJobName("chain");
  * conf.setInputFormat(TextInputFormat.class);
  * conf.setOutputFormat(TextOutputFormat.class);
- * <p/>
+ *
  * JobConf mapAConf = new JobConf(false);
  * ...
  * ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
  *   Text.class, Text.class, true, mapAConf);
- * <p/>
+ *
  * JobConf mapBConf = new JobConf(false);
  * ...
  * ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
  *   LongWritable.class, Text.class, false, mapBConf);
- * <p/>
+ *
  * JobConf reduceConf = new JobConf(false);
  * ...
  * ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
  *   Text.class, Text.class, true, reduceConf);
- * <p/>
+ *
  * ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
  *   LongWritable.class, Text.class, false, null);
- * <p/>
+ *
  * ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
  *   LongWritable.class, LongWritable.class, true, null);
- * <p/>
+ *
  * FileInputFormat.setInputPaths(conf, inDir);
  * FileOutputFormat.setOutputPath(conf, outDir);
  * ...
- * <p/>
+ *
  * JobClient jc = new JobClient(conf);
  * RunningJob job = jc.submitJob(conf);
  * ...
@@ -95,21 +95,21 @@ public class ChainMapper implements Mapper {
 
   /**
    * Adds a Mapper class to the chain job's JobConf.
-   * <p/>
+   * <p>
    * It has to be specified how key and values are passed from one element of
    * the chain to the next, by value or by reference. If a Mapper leverages the
    * assumed semantics that the key and values are not modified by the collector
    * 'by value' must be used. If the Mapper does not expect this semantics, as
    * an optimization to avoid serialization and deserialization 'by reference'
    * can be used.
-   * <p/>
+   * <p>
    * For the added Mapper the configuration given for it,
    * <code>mapperConf</code>, have precedence over the job's JobConf. This
    * precedence is in effect when the task is running.
-   * <p/>
+   * <p>
    * IMPORTANT: There is no need to specify the output key/value classes for the
    * ChainMapper, this is done by the addMapper for the last mapper in the chain
-   * <p/>
+   * <p>
    *
    * @param job              job's JobConf to add the Mapper class.
    * @param klass            the Mapper class to add.
@@ -148,7 +148,7 @@ public class ChainMapper implements Mapper {
 
   /**
    * Configures the ChainMapper and all the Mappers in the chain.
-   * <p/>
+   * <p>
    * If this method is overriden <code>super.configure(...)</code> should be
    * invoked at the beginning of the overwriter method.
    */
@@ -171,7 +171,7 @@ public class ChainMapper implements Mapper {
 
   /**
    * Closes  the ChainMapper and all the Mappers in the chain.
-   * <p/>
+   * <p>
    * If this method is overriden <code>super.close()</code> should be
    * invoked at the end of the overwriter method.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java
index 641d82c..6f5b7cd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java
@@ -27,63 +27,63 @@ import java.util.Iterator;
 /**
  * The ChainReducer class allows to chain multiple Mapper classes after a
  * Reducer within the Reducer task.
- * <p/>
+ * <p>
  * For each record output by the Reducer, the Mapper classes are invoked in a
  * chained (or piped) fashion, the output of the first becomes the input of the
  * second, and so on until the last Mapper, the output of the last Mapper will
  * be written to the task's output.
- * <p/>
+ * <p>
  * The key functionality of this feature is that the Mappers in the chain do not
  * need to be aware that they are executed after the Reducer or in a chain.
  * This enables having reusable specialized Mappers that can be combined to
  * perform composite operations within a single task.
- * <p/>
+ * <p>
  * Special care has to be taken when creating chains that the key/values output
  * by a Mapper are valid for the following Mapper in the chain. It is assumed
  * all Mappers and the Reduce in the chain use maching output and input key and
  * value classes as no conversion is done by the chaining code.
- * <p/>
+ * <p>
  * Using the ChainMapper and the ChainReducer classes is possible to compose
  * Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
  * immediate benefit of this pattern is a dramatic reduction in disk IO.
- * <p/>
+ * <p>
  * IMPORTANT: There is no need to specify the output key/value classes for the
  * ChainReducer, this is done by the setReducer or the addMapper for the last
  * element in the chain.
- * <p/>
+ * <p>
  * ChainReducer usage pattern:
- * <p/>
+ * <p>
  * <pre>
  * ...
  * conf.setJobName("chain");
  * conf.setInputFormat(TextInputFormat.class);
  * conf.setOutputFormat(TextOutputFormat.class);
- * <p/>
+ *
  * JobConf mapAConf = new JobConf(false);
  * ...
  * ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
  *   Text.class, Text.class, true, mapAConf);
- * <p/>
+ *
  * JobConf mapBConf = new JobConf(false);
  * ...
  * ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
  *   LongWritable.class, Text.class, false, mapBConf);
- * <p/>
+ *
  * JobConf reduceConf = new JobConf(false);
  * ...
  * ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
  *   Text.class, Text.class, true, reduceConf);
- * <p/>
+ *
  * ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
  *   LongWritable.class, Text.class, false, null);
- * <p/>
+ *
  * ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
  *   LongWritable.class, LongWritable.class, true, null);
- * <p/>
+ *
  * FileInputFormat.setInputPaths(conf, inDir);
  * FileOutputFormat.setOutputPath(conf, outDir);
  * ...
- * <p/>
+ *
  * JobClient jc = new JobClient(conf);
  * RunningJob job = jc.submitJob(conf);
  * ...
@@ -95,18 +95,18 @@ public class ChainReducer implements Reducer {
 
   /**
    * Sets the Reducer class to the chain job's JobConf.
-   * <p/>
+   * <p>
    * It has to be specified how key and values are passed from one element of
    * the chain to the next, by value or by reference. If a Reducer leverages the
    * assumed semantics that the key and values are not modified by the collector
    * 'by value' must be used. If the Reducer does not expect this semantics, as
    * an optimization to avoid serialization and deserialization 'by reference'
    * can be used.
-   * <p/>
+   * <p>
    * For the added Reducer the configuration given for it,
    * <code>reducerConf</code>, have precedence over the job's JobConf. This
    * precedence is in effect when the task is running.
-   * <p/>
+   * <p>
    * IMPORTANT: There is no need to specify the output key/value classes for the
    * ChainReducer, this is done by the setReducer or the addMapper for the last
    * element in the chain.
@@ -139,18 +139,18 @@ public class ChainReducer implements Reducer {
 
   /**
    * Adds a Mapper class to the chain job's JobConf.
-   * <p/>
+   * <p>
    * It has to be specified how key and values are passed from one element of
    * the chain to the next, by value or by reference. If a Mapper leverages the
    * assumed semantics that the key and values are not modified by the collector
    * 'by value' must be used. If the Mapper does not expect this semantics, as
    * an optimization to avoid serialization and deserialization 'by reference'
    * can be used.
-   * <p/>
+   * <p>
    * For the added Mapper the configuration given for it,
    * <code>mapperConf</code>, have precedence over the job's JobConf. This
    * precedence is in effect when the task is running.
-   * <p/>
+   * <p>
    * IMPORTANT: There is no need to specify the output key/value classes for the
    * ChainMapper, this is done by the addMapper for the last mapper in the chain
    * .
@@ -191,7 +191,7 @@ public class ChainReducer implements Reducer {
 
   /**
    * Configures the ChainReducer, the Reducer and all the Mappers in the chain.
-   * <p/>
+   * <p>
    * If this method is overriden <code>super.configure(...)</code> should be
    * invoked at the beginning of the overwriter method.
    */
@@ -215,7 +215,7 @@ public class ChainReducer implements Reducer {
 
   /**
    * Closes  the ChainReducer, the Reducer and all the Mappers in the chain.
-   * <p/>
+   * <p>
    * If this method is overriden <code>super.close()</code> should be
    * invoked at the end of the overwriter method.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
index 39e80f9..f0f3652 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
@@ -31,29 +31,29 @@ import java.util.*;
  * than the job default output via the <code>OutputCollector</code> passed to
  * the <code>map()</code> and <code>reduce()</code> methods of the
  * <code>Mapper</code> and <code>Reducer</code> implementations.
- * <p/>
+ * <p>
  * Each additional output, or named output, may be configured with its own
  * <code>OutputFormat</code>, with its own key class and with its own value
  * class.
- * <p/>
+ * <p>
  * A named output can be a single file or a multi file. The later is refered as
  * a multi named output.
- * <p/>
+ * <p>
  * A multi named output is an unbound set of files all sharing the same
  * <code>OutputFormat</code>, key class and value class configuration.
- * <p/>
+ * <p>
  * When named outputs are used within a <code>Mapper</code> implementation,
  * key/values written to a name output are not part of the reduce phase, only
  * key/values written to the job <code>OutputCollector</code> are part of the
  * reduce phase.
- * <p/>
+ * <p>
  * MultipleOutputs supports counters, by default the are disabled. The counters
  * group is the {@link MultipleOutputs} class name.
  * </p>
  * The names of the counters are the same as the named outputs. For multi
  * named outputs the name of the counter is the concatenation of the named
  * output, and underscore '_' and the multiname.
- * <p/>
+ * <p>
  * Job configuration usage pattern is:
  * <pre>
  *
@@ -82,7 +82,7 @@ import java.util.*;
  *
  * ...
  * </pre>
- * <p/>
+ * <p>
  * Job configuration usage pattern is:
  * <pre>
  *
@@ -271,7 +271,6 @@ public class MultipleOutputs {
 
   /**
    * Adds a named output for the job.
-   * <p/>
    *
    * @param conf              job conf to add the named output
    * @param namedOutput       named output name, it has to be a word, letters
@@ -291,7 +290,6 @@ public class MultipleOutputs {
 
   /**
    * Adds a multi named output for the job.
-   * <p/>
    *
    * @param conf              job conf to add the named output
    * @param namedOutput       named output name, it has to be a word, letters
@@ -311,7 +309,6 @@ public class MultipleOutputs {
 
   /**
    * Adds a named output for the job.
-   * <p/>
    *
    * @param conf              job conf to add the named output
    * @param namedOutput       named output name, it has to be a word, letters
@@ -339,9 +336,9 @@ public class MultipleOutputs {
 
   /**
    * Enables or disables counters for the named outputs.
-   * <p/>
+   * <p>
    * By default these counters are disabled.
-   * <p/>
+   * <p>
    * MultipleOutputs supports counters, by default the are disabled.
    * The counters group is the {@link MultipleOutputs} class name.
    * </p>
@@ -358,9 +355,9 @@ public class MultipleOutputs {
 
   /**
    * Returns if the counters for the named outputs are enabled or not.
-   * <p/>
+   * <p>
    * By default these counters are disabled.
-   * <p/>
+   * <p>
    * MultipleOutputs supports counters, by default the are disabled.
    * The counters group is the {@link MultipleOutputs} class name.
    * </p>
@@ -465,7 +462,6 @@ public class MultipleOutputs {
 
   /**
    * Gets the output collector for a named output.
-   * <p/>
    *
    * @param namedOutput the named output name
    * @param reporter    the reporter
@@ -480,7 +476,6 @@ public class MultipleOutputs {
 
   /**
    * Gets the output collector for a multi named output.
-   * <p/>
    *
    * @param namedOutput the named output name
    * @param multiName   the multi name part
@@ -525,7 +520,7 @@ public class MultipleOutputs {
 
   /**
    * Closes all the opened named outputs.
-   * <p/>
+   * <p>
    * If overriden subclasses must invoke <code>super.close()</code> at the
    * end of their <code>close()</code>
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
index 8e884ce..75179e1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TokenCountMapper.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.mapred.Reporter;
 
 
 /** 
- * A {@link Mapper} that maps text values into <token,freq> pairs.  Uses
+ * A {@link Mapper} that maps text values into &lt;token,freq&gt; pairs.  Uses
  * {@link StringTokenizer} to break text into tokens. 
  */
 @InterfaceAudience.Public

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
index 8c20723..6251925 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorJob.java
@@ -60,7 +60,7 @@ import org.apache.hadoop.util.GenericOptionsParser;
  * The developer using Aggregate will need only to provide a plugin class
  * conforming to the following interface:
  * 
- * public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ * public interface ValueAggregatorDescriptor { public ArrayList&lt;Entry&gt;
  * generateKeyValPairs(Object key, Object value); public void
  * configure(JobConfjob); }
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
index a6b3573..2738968 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/aggregate/ValueAggregatorReducer.java
@@ -45,7 +45,8 @@ public class ValueAggregatorReducer<K1 extends WritableComparable,
    *          driven computing is achieved. It is assumed that each aggregator's
    *          getReport method emits appropriate output for the aggregator. This
    *          may be further customiized.
-   * @value the values to be aggregated
+   * @param values
+   *          the values to be aggregated
    */
   public void reduce(Text key, Iterator<Text> values,
                      OutputCollector<Text, Text> output, Reporter reporter) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
index 2715705..159919f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
@@ -195,8 +195,8 @@ public class DBInputFormat<T  extends DBWritable>
    * @param inputClass the class object implementing DBWritable, which is the 
    * Java object holding tuple fields.
    * @param tableName The table to read data from
-   * @param conditions The condition which to select data with, eg. '(updated >
-   * 20070101 AND length > 0)'
+   * @param conditions The condition which to select data with, eg. '(updated &gt;
+   * 20070101 AND length &gt; 0)'
    * @param orderBy the fieldNames in the orderBy clause.
    * @param fieldNames The field names in the table
    * @see #setInput(JobConf, Class, String, String)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index 60ff715..34353ac 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -134,6 +134,7 @@ public class Cluster {
   
   /**
    * Close the <code>Cluster</code>.
+   * @throws IOException
    */
   public synchronized void close() throws IOException {
     clientProtocolProvider.close(client);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
index c4c2778..b5e54b5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ClusterMetrics.java
@@ -40,15 +40,15 @@ import org.apache.hadoop.io.Writable;
  *   Slot capacity of the cluster. 
  *   </li>
  *   <li>
- *   The number of currently occupied/reserved map & reduce slots.
+ *   The number of currently occupied/reserved map and reduce slots.
  *   </li>
  *   <li>
- *   The number of currently running map & reduce tasks.
+ *   The number of currently running map and reduce tasks.
  *   </li>
  *   <li>
  *   The number of job submissions.
  *   </li>
- * </ol></p>
+ * </ol>
  * 
  * <p>Clients can query for the latest <code>ClusterMetrics</code>, via 
  * {@link Cluster#getClusterStatus()}.</p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
index 184cdf0..ef06176 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
@@ -123,11 +123,11 @@ public class CryptoUtils {
    * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
    * variable.
    * 
-   * If the value of 'length' is > -1, The InputStream is additionally wrapped
-   * in a LimitInputStream. CryptoStreams are late buffering in nature. This
-   * means they will always try to read ahead if they can. The LimitInputStream
-   * will ensure that the CryptoStream does not read past the provided length
-   * from the given Input Stream.
+   * If the value of 'length' is &gt; -1, The InputStream is additionally
+   * wrapped in a LimitInputStream. CryptoStreams are late buffering in nature.
+   * This means they will always try to read ahead if they can. The
+   * LimitInputStream will ensure that the CryptoStream does not read past the
+   * provided length from the given Input Stream.
    * 
    * @param conf
    * @param in

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 470290c..f404175 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -69,7 +69,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
  *
  *     // Submit the job, then poll for progress until the job is complete
  *     job.waitForCompletion(true);
- * </pre></blockquote></p>
+ * </pre></blockquote>
  * 
  * 
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java
index 836f182..6bd2d1f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobContext.java
@@ -289,7 +289,6 @@ public interface JobContext extends MRJobConfig {
    * Get the timestamps of the archives.  Used by internal
    * DistributedCache and MapReduce code.
    * @return a string array of timestamps 
-   * @throws IOException
    */
   public String[] getArchiveTimestamps();
 
@@ -297,7 +296,6 @@ public interface JobContext extends MRJobConfig {
    * Get the timestamps of the files.  Used by internal
    * DistributedCache and MapReduce code.
    * @return a string array of timestamps 
-   * @throws IOException
    */
   public String[] getFileTimestamps();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
index 516e661..7125077 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
@@ -100,7 +100,7 @@ public class JobSubmissionFiles {
 
   /**
    * Initializes the staging directory and returns the path. It also
-   * keeps track of all necessary ownership & permissions
+   * keeps track of all necessary ownership and permissions
    * @param cluster
    * @param conf
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java
index 3a6186b..6b4147b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Mapper.java
@@ -42,9 +42,9 @@ import org.apache.hadoop.mapreduce.task.MapContextImpl;
  * 
  * <p>The framework first calls 
  * {@link #setup(org.apache.hadoop.mapreduce.Mapper.Context)}, followed by
- * {@link #map(Object, Object, Context)} 
+ * {@link #map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context)}
  * for each key/value pair in the <code>InputSplit</code>. Finally 
- * {@link #cleanup(Context)} is called.</p>
+ * {@link #cleanup(org.apache.hadoop.mapreduce.Mapper.Context)} is called.</p>
  * 
  * <p>All intermediate values associated with a given output key are 
  * subsequently grouped by the framework, and passed to a {@link Reducer} to  
@@ -84,9 +84,10 @@ import org.apache.hadoop.mapreduce.task.MapContextImpl;
  *     }
  *   }
  * }
- * </pre></blockquote></p>
+ * </pre></blockquote>
  *
- * <p>Applications may override the {@link #run(Context)} method to exert 
+ * <p>Applications may override the
+ * {@link #run(org.apache.hadoop.mapreduce.Mapper.Context)} method to exert
  * greater control on map processing e.g. multi-threaded <code>Mapper</code>s 
  * etc.</p>
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java
index ddf67e1..ab67ab0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Reducer.java
@@ -39,14 +39,14 @@ import java.util.Iterator;
  * <ol>
  *   <li>
  *   
- *   <h4 id="Shuffle">Shuffle</h4>
+ *   <b id="Shuffle">Shuffle</b>
  *   
  *   <p>The <code>Reducer</code> copies the sorted output from each 
  *   {@link Mapper} using HTTP across the network.</p>
  *   </li>
  *   
  *   <li>
- *   <h4 id="Sort">Sort</h4>
+ *   <b id="Sort">Sort</b>
  *   
  *   <p>The framework merge sorts <code>Reducer</code> inputs by 
  *   <code>key</code>s 
@@ -55,7 +55,7 @@ import java.util.Iterator;
  *   <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
  *   being fetched they are merged.</p>
  *      
- *   <h5 id="SecondarySort">SecondarySort</h5>
+ *   <b id="SecondarySort">SecondarySort</b>
  *   
  *   <p>To achieve a secondary sort on the values returned by the value 
  *   iterator, the application should extend the key with the secondary
@@ -83,10 +83,10 @@ import java.util.Iterator;
  *   </li>
  *   
  *   <li>   
- *   <h4 id="Reduce">Reduce</h4>
+ *   <b id="Reduce">Reduce</b>
  *   
  *   <p>In this phase the 
- *   {@link #reduce(Object, Iterable, Context)}
+ *   {@link #reduce(Object, Iterable, org.apache.hadoop.mapreduce.Reducer.Context)}
  *   method is called for each <code>&lt;key, (collection of values)&gt;</code> in
  *   the sorted inputs.</p>
  *   <p>The output of the reduce task is typically written to a 
@@ -113,7 +113,7 @@ import java.util.Iterator;
  *     context.write(key, result);
  *   }
  * }
- * </pre></blockquote></p>
+ * </pre></blockquote>
  * 
  * @see Mapper
  * @see Partitioner

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
index 06737c9..51fe69a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
@@ -115,7 +115,7 @@ import java.net.URI;
  *       }
  *     }
  *     
- * </pre></blockquote></p>
+ * </pre></blockquote>
  *
  * It is also very common to use the DistributedCache by using
  * {@link org.apache.hadoop.util.GenericOptionsParser}.
@@ -235,7 +235,6 @@ public class DistributedCache {
    * DistributedCache and MapReduce code.
    * @param conf The configuration which stored the timestamps
    * @return a long array of timestamps
-   * @throws IOException
    * @deprecated Use {@link JobContext#getArchiveTimestamps()} instead
    */
   @Deprecated
@@ -250,7 +249,6 @@ public class DistributedCache {
    * DistributedCache and MapReduce code.
    * @param conf The configuration which stored the timestamps
    * @return a long array of timestamps
-   * @throws IOException
    * @deprecated Use {@link JobContext#getFileTimestamps()} instead
    */
   @Deprecated
@@ -434,7 +432,6 @@ public class DistributedCache {
    * internal DistributedCache and MapReduce code.
    * @param conf The configuration which stored the timestamps
    * @return a string array of booleans 
-   * @throws IOException
    */
   public static boolean[] getFileVisibilities(Configuration conf) {
     return parseBooleans(conf.getStrings(MRJobConfig.CACHE_FILE_VISIBILITIES));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
index d8833da..de25f64 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
@@ -60,7 +60,7 @@ import org.apache.hadoop.util.GenericOptionsParser;
  * The developer using Aggregate will need only to provide a plugin class
  * conforming to the following interface:
  * 
- * public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ * public interface ValueAggregatorDescriptor { public ArrayList&lt;Entry&gt;
  * generateKeyValPairs(Object key, Object value); public void
  * configure(Configuration conf); }
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
index 208616b..1dad13e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
@@ -600,7 +600,7 @@ public class Chain {
   /**
    * Adds a Mapper class to the chain job.
    * 
-   * <p/>
+   * <p>
    * The configuration properties of the chain job have precedence over the
    * configuration properties of the Mapper.
    * 
@@ -738,7 +738,7 @@ public class Chain {
   /**
    * Sets the Reducer class to the chain job.
    * 
-   * <p/>
+   * <p>
    * The configuration properties of the chain job have precedence over the
    * configuration properties of the Reducer.
    * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
index c042ff0..c3bf012 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainMapper.java
@@ -57,24 +57,24 @@ import org.apache.hadoop.mapreduce.lib.chain.Chain.ChainBlockingQueue;
  * ChainMapper, this is done by the addMapper for the last mapper in the chain.
  * </p>
  * ChainMapper usage pattern:
- * <p/>
+ * <p>
  * 
  * <pre>
  * ...
  * Job = new Job(conf);
- * <p/>
+ *
  * Configuration mapAConf = new Configuration(false);
  * ...
  * ChainMapper.addMapper(job, AMap.class, LongWritable.class, Text.class,
  *   Text.class, Text.class, true, mapAConf);
- * <p/>
+ *
  * Configuration mapBConf = new Configuration(false);
  * ...
  * ChainMapper.addMapper(job, BMap.class, Text.class, Text.class,
  *   LongWritable.class, Text.class, false, mapBConf);
- * <p/>
+ *
  * ...
- * <p/>
+ *
  * job.waitForComplettion(true);
  * ...
  * </pre>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
index dc03d5d..1c37587 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/ChainReducer.java
@@ -50,7 +50,7 @@ import java.io.IOException;
  * all Mappers and the Reduce in the chain use matching output and input key and
  * value classes as no conversion is done by the chaining code.
  * </p>
- * </p> Using the ChainMapper and the ChainReducer classes is possible to
+ * <p> Using the ChainMapper and the ChainReducer classes is possible to
  * compose Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
  * immediate benefit of this pattern is a dramatic reduction in disk IO. </p>
  * <p>
@@ -59,26 +59,26 @@ import java.io.IOException;
  * element in the chain.
  * </p>
  * ChainReducer usage pattern:
- * <p/>
+ * <p>
  * 
  * <pre>
  * ...
  * Job = new Job(conf);
  * ....
- * <p/>
+ *
  * Configuration reduceConf = new Configuration(false);
  * ...
  * ChainReducer.setReducer(job, XReduce.class, LongWritable.class, Text.class,
  *   Text.class, Text.class, true, reduceConf);
- * <p/>
+ *
  * ChainReducer.addMapper(job, CMap.class, Text.class, Text.class,
  *   LongWritable.class, Text.class, false, null);
- * <p/>
+ *
  * ChainReducer.addMapper(job, DMap.class, LongWritable.class, Text.class,
  *   LongWritable.class, LongWritable.class, true, null);
- * <p/>
+ *
  * ...
- * <p/>
+ *
  * job.waitForCompletion(true);
  * ...
  * </pre>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
index a6953b7..78c3a0f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
@@ -319,7 +319,7 @@ public class DBInputFormat<T extends DBWritable>
    * Java object holding tuple fields.
    * @param tableName The table to read data from
    * @param conditions The condition which to select data with, 
-   * eg. '(updated > 20070101 AND length > 0)'
+   * eg. '(updated &gt; 20070101 AND length &gt; 0)'
    * @param orderBy the fieldNames in the orderBy clause.
    * @param fieldNames The field names in the table
    * @see #setInput(Job, Class, String, String)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
index cc0d30a..5753a3b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
@@ -73,7 +73,7 @@ import org.apache.hadoop.io.Writable;
  *     timestamp = resultSet.getLong(2);
  *   } 
  * }
- * </pre></p>
+ * </pre>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
index af6b3f0..2990ca9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java
@@ -144,7 +144,7 @@ public class TupleWritable implements Writable, Iterable<Writable> {
 
   /**
    * Convert Tuple to String as in the following.
-   * <tt>[<child1>,<child2>,...,<childn>]</tt>
+   * <tt>[&lt;child1&gt;,&lt;child2&gt;,...,&lt;childn&gt;]</tt>
    */
   public String toString() {
     StringBuffer buf = new StringBuffer("[");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
index 814e494..733b18c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
@@ -44,15 +44,15 @@ import java.util.List;
  * Multithreaded implementation for @link org.apache.hadoop.mapreduce.Mapper.
  * <p>
  * It can be used instead of the default implementation,
- * @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ * {@link org.apache.hadoop.mapred.MapRunner}, when the Map operation is not CPU
  * bound in order to improve throughput.
  * <p>
  * Mapper implementations using this MapRunnable must be thread-safe.
  * <p>
  * The Map-Reduce job has to be configured with the mapper to use via 
- * {@link #setMapperClass(Configuration, Class)} and
+ * {@link #setMapperClass(Job, Class)} and
  * the number of thread the thread-pool can use with the
- * {@link #getNumberOfThreads(Configuration) method. The default
+ * {@link #getNumberOfThreads(JobContext)} method. The default
  * value is 10 threads.
  * <p>
  */


[39/50] [abbrv] hadoop git commit: HADOOP-11653. shellprofiles should require .sh extension (Brahma Reddy Battula via aw)

Posted by ji...@apache.org.
HADOOP-11653. shellprofiles should require .sh extension (Brahma Reddy Battula via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/667c3fce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/667c3fce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/667c3fce

Branch: refs/heads/HDFS-7285
Commit: 667c3fce2d3404c4daf5cdf2a034e7a53d3754c7
Parents: c6199e7
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Mar 6 13:54:11 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:26 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../src/main/bin/hadoop-functions.sh            |   4 +-
 .../src/main/conf/shellprofile.d/example        | 106 -------------------
 .../src/main/conf/shellprofile.d/example.sh     | 106 +++++++++++++++++++
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs    |  36 -------
 .../hadoop-hdfs/src/main/shellprofile.d/hdfs.sh |  36 +++++++
 .../shellprofile.d/mapreduce                    |  41 -------
 .../shellprofile.d/mapreduce.sh                 |  41 +++++++
 .../hadoop-yarn/shellprofile.d/yarn             |  62 -----------
 .../hadoop-yarn/shellprofile.d/yarn.sh          |  62 +++++++++++
 10 files changed, 250 insertions(+), 247 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 65c6d85..628faa3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -414,6 +414,9 @@ Trunk (Unreleased)
 
     HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
 
+    HADOOP-11653. shellprofiles should require .sh extension
+    (Brahma Reddy Battula via aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index bccbe25..9488e3c 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -162,13 +162,13 @@ function hadoop_import_shellprofiles
   local files2
 
   if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
-    files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*)
+    files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
   else
     hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
   fi
 
   if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then
-    files2=(${HADOOP_CONF_DIR}/shellprofile.d/*)
+    files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
   fi
 
   for i in "${files1[@]}" "${files2[@]}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example b/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
deleted file mode 100644
index dc50821..0000000
--- a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example
+++ /dev/null
@@ -1,106 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# This is an example shell profile.  It does not do anything other than
-# show an example of what the general structure and API of the pluggable
-# shell profile code looks like.
-#
-#
-
-#
-#  First, register the profile:
-#
-# hadoop_add_profile example
-#
-#
-# This profile name determines what the name of the functions will
-# be. The general pattern is _(profilename)_hadoop_(capability).  There
-# are currently four capabilities:
-#  * init
-#  * classpath
-#  * nativelib
-#  * finalize
-#
-# None of these functions are required.  Examples of all four follow...
-
-#
-# The _hadoop_init function is called near the very beginning of the
-# execution cycle. System and site-level shell env vars have been set,
-# command line processing finished, etc.  Note that the user's .hadooprc
-# has not yet been processed.  This is to allow them to override anything
-# that may be set here or potentially a dependency!
-#
-# function _example_hadoop_init
-# {
-#   # This example expects a home.  So set a default if not set.
-#   EXAMPLE_HOME="${EXAMPLE_HOME:-/usr/example}"
-# }
-#
-
-#
-# The _hadoop_classpath function is called when the shell code is
-# establishing the classpath.  This function should use the
-# shell hadoop_add_classpath function rather than directly
-# manipulating the CLASSPATH variable.  This ensures that the
-# CLASSPATH does not have duplicates and provides basic
-# sanity checks
-#
-# function _example_hadoop_classpath
-# {
-#   # jars that should be near the front
-#   hadoop_add_classpath "${EXAMPLE_HOME}/share/pre-jars/*" before
-#
-#   # jars that should be near the back
-#   hadoop_add_classpath "${EXAMPLE_HOME}/share/post-jars/*" after
-# }
-
-#
-# The _hadoop_nativelib function is called when the shell code is
-# buliding the locations for linkable shared libraries. Depending
-# upon needs, there are shell function calls that are useful
-# to use here:
-#
-# hadoop_add_javalibpath will push the path onto the command line
-# and into the java.library.path system property.  In the majority
-# of cases, this should be sufficient, especially if the shared
-# library has been linked correctly with $ORIGIN.
-#
-# hadoop_add_ldlibpath will push the path into the LD_LIBRARY_PATH
-# env var.  This should be unnecessary for most code.
-#
-# function _example_hadoop_nativelib
-# {
-#   # our library is standalone, so just need the basic path
-#   # added. Using after so we are later in the link list
-#   hadoop_add_javalibpath "${EXAMPLE_HOME}/lib" after
-# }
-
-#
-# The _hadoop_finalize function is called to finish up whatever
-# extra work needs to be done prior to exec'ing java or some other
-# binary. This is where command line properties should get added
-# and any last minute work.  This is called prior to Hadoop common
-# which means that one can override any parameters that Hadoop
-# would normally put here... so be careful!
-#
-# Useful functions here include hadoop_add_param and for
-# Windows compabitility, hadoop_translate_cygwin_path.
-#
-# function _example_hadoop_finalize
-# {
-#   # we need a property for our feature
-#   hadoop_add_param HADOOP_OPTS Dexample.feature "-Dexample.feature=awesome"
-# }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh b/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh
new file mode 100644
index 0000000..dc50821
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/conf/shellprofile.d/example.sh
@@ -0,0 +1,106 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This is an example shell profile.  It does not do anything other than
+# show an example of what the general structure and API of the pluggable
+# shell profile code looks like.
+#
+#
+
+#
+#  First, register the profile:
+#
+# hadoop_add_profile example
+#
+#
+# This profile name determines what the name of the functions will
+# be. The general pattern is _(profilename)_hadoop_(capability).  There
+# are currently four capabilities:
+#  * init
+#  * classpath
+#  * nativelib
+#  * finalize
+#
+# None of these functions are required.  Examples of all four follow...
+
+#
+# The _hadoop_init function is called near the very beginning of the
+# execution cycle. System and site-level shell env vars have been set,
+# command line processing finished, etc.  Note that the user's .hadooprc
+# has not yet been processed.  This is to allow them to override anything
+# that may be set here or potentially a dependency!
+#
+# function _example_hadoop_init
+# {
+#   # This example expects a home.  So set a default if not set.
+#   EXAMPLE_HOME="${EXAMPLE_HOME:-/usr/example}"
+# }
+#
+
+#
+# The _hadoop_classpath function is called when the shell code is
+# establishing the classpath.  This function should use the
+# shell hadoop_add_classpath function rather than directly
+# manipulating the CLASSPATH variable.  This ensures that the
+# CLASSPATH does not have duplicates and provides basic
+# sanity checks
+#
+# function _example_hadoop_classpath
+# {
+#   # jars that should be near the front
+#   hadoop_add_classpath "${EXAMPLE_HOME}/share/pre-jars/*" before
+#
+#   # jars that should be near the back
+#   hadoop_add_classpath "${EXAMPLE_HOME}/share/post-jars/*" after
+# }
+
+#
+# The _hadoop_nativelib function is called when the shell code is
+# buliding the locations for linkable shared libraries. Depending
+# upon needs, there are shell function calls that are useful
+# to use here:
+#
+# hadoop_add_javalibpath will push the path onto the command line
+# and into the java.library.path system property.  In the majority
+# of cases, this should be sufficient, especially if the shared
+# library has been linked correctly with $ORIGIN.
+#
+# hadoop_add_ldlibpath will push the path into the LD_LIBRARY_PATH
+# env var.  This should be unnecessary for most code.
+#
+# function _example_hadoop_nativelib
+# {
+#   # our library is standalone, so just need the basic path
+#   # added. Using after so we are later in the link list
+#   hadoop_add_javalibpath "${EXAMPLE_HOME}/lib" after
+# }
+
+#
+# The _hadoop_finalize function is called to finish up whatever
+# extra work needs to be done prior to exec'ing java or some other
+# binary. This is where command line properties should get added
+# and any last minute work.  This is called prior to Hadoop common
+# which means that one can override any parameters that Hadoop
+# would normally put here... so be careful!
+#
+# Useful functions here include hadoop_add_param and for
+# Windows compabitility, hadoop_translate_cygwin_path.
+#
+# function _example_hadoop_finalize
+# {
+#   # we need a property for our feature
+#   hadoop_add_param HADOOP_OPTS Dexample.feature "-Dexample.feature=awesome"
+# }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs
deleted file mode 100644
index 5eb9e48..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs
+++ /dev/null
@@ -1,36 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hadoop_add_profile hdfs
-
-function _hdfs_hadoop_classpath
-{
-  #
-  # get all of the hdfs jars+config in the path
-  #
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
-  fi
-
-  # put hdfs in classpath if present
-  if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
-  fi
-
-  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh
new file mode 100644
index 0000000..5eb9e48
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/shellprofile.d/hdfs.sh
@@ -0,0 +1,36 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop_add_profile hdfs
+
+function _hdfs_hadoop_classpath
+{
+  #
+  # get all of the hdfs jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
+  fi
+
+  # put hdfs in classpath if present
+  if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
+  fi
+
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-mapreduce-project/shellprofile.d/mapreduce
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/shellprofile.d/mapreduce b/hadoop-mapreduce-project/shellprofile.d/mapreduce
deleted file mode 100644
index 0b3dab1..0000000
--- a/hadoop-mapreduce-project/shellprofile.d/mapreduce
+++ /dev/null
@@ -1,41 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hadoop_add_profile mapred
-
-function _mapred_hadoop_classpath
-{
-  #
-  # get all of the mapreduce jars+config in the path
-  #
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
-  fi
-
-  if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
-  fi
-
-  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh b/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh
new file mode 100644
index 0000000..0b3dab1
--- /dev/null
+++ b/hadoop-mapreduce-project/shellprofile.d/mapreduce.sh
@@ -0,0 +1,41 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop_add_profile mapred
+
+function _mapred_hadoop_classpath
+{
+  #
+  # get all of the mapreduce jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
+  fi
+
+  if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
+  fi
+
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn
deleted file mode 100644
index 4aa20b1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hadoop_add_profile yarn
-
-function _yarn_hadoop_classpath
-{
-  local i
-  #
-  # get all of the yarn jars+config in the path
-  #
-  # developers
-  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
-    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
-             yarn-server/yarn-server-nodemanager \
-             yarn-server/yarn-server-common \
-             yarn-server/yarn-server-resourcemanager; do
-      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
-    done
-
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
-  fi
-
-  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
-  fi
-
-  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
-  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
-}
-
-function _yarn_hadoop_finalize
-{
-  # Add YARN custom options to comamnd line in case someone actaully
-  # used these.
-  #
-  # Note that we are replacing ' ' with '\ ' so that when we exec
-  # stuff it works
-  #
-  local yld=$HADOOP_LOG_DIR
-  hadoop_translate_cygwin_path yld
-  hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${yld}"
-  hadoop_add_param HADOOP_OPTS yarn.log.file "-Dyarn.log.file=${HADOOP_LOGFILE}"
-  local yhd=$HADOOP_YARN_HOME
-  hadoop_translate_cygwin_path yhd
-  hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${yhd}"
-  hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667c3fce/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
new file mode 100644
index 0000000..4aa20b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/shellprofile.d/yarn.sh
@@ -0,0 +1,62 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop_add_profile yarn
+
+function _yarn_hadoop_classpath
+{
+  local i
+  #
+  # get all of the yarn jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
+             yarn-server/yarn-server-nodemanager \
+             yarn-server/yarn-server-common \
+             yarn-server/yarn-server-resourcemanager; do
+      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
+    done
+
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
+  fi
+
+  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
+  fi
+
+  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
+}
+
+function _yarn_hadoop_finalize
+{
+  # Add YARN custom options to comamnd line in case someone actaully
+  # used these.
+  #
+  # Note that we are replacing ' ' with '\ ' so that when we exec
+  # stuff it works
+  #
+  local yld=$HADOOP_LOG_DIR
+  hadoop_translate_cygwin_path yld
+  hadoop_add_param HADOOP_OPTS yarn.log.dir "-Dyarn.log.dir=${yld}"
+  hadoop_add_param HADOOP_OPTS yarn.log.file "-Dyarn.log.file=${HADOOP_LOGFILE}"
+  local yhd=$HADOOP_YARN_HOME
+  hadoop_translate_cygwin_path yhd
+  hadoop_add_param HADOOP_OPTS yarn.home.dir "-Dyarn.home.dir=${yhd}"
+  hadoop_add_param HADOOP_OPTS yarn.root.logger "-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+}


[48/50] [abbrv] hadoop git commit: HDFS-7411. Change decommission logic to throttle by blocks rather than nodes in each interval. Contributed by Andrew Wang

Posted by ji...@apache.org.
HDFS-7411. Change decommission logic to throttle by blocks rather
than nodes in each interval. Contributed by Andrew Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1e4dfe2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1e4dfe2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1e4dfe2

Branch: refs/heads/HDFS-7285
Commit: a1e4dfe211b5153697d2375680314c2295fc9e05
Parents: 7b91223
Author: Chris Douglas <cd...@apache.org>
Authored: Sun Mar 8 18:31:04 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:17:55 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../apache/hadoop/hdfs/HdfsConfiguration.java   |   2 +-
 .../server/blockmanagement/BlockManager.java    | 123 +---
 .../server/blockmanagement/DatanodeManager.java | 109 +---
 .../blockmanagement/DecommissionManager.java    | 619 +++++++++++++++++--
 .../src/main/resources/hdfs-default.xml         |  23 +-
 .../apache/hadoop/hdfs/TestDecommission.java    | 412 ++++++++----
 .../blockmanagement/BlockManagerTestUtil.java   |   8 +-
 .../TestReplicationPolicyConsiderLoad.java      |   2 +-
 .../namenode/TestDecommissioningStatus.java     |  59 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   2 +-
 .../namenode/TestNamenodeCapacityReport.java    |   4 +-
 13 files changed, 996 insertions(+), 376 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 29717e1..3cd6372 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -719,6 +719,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7855. Separate class Packet from DFSOutputStream. (Li Bo bia jing9)
 
+    HDFS-7411. Change decommission logic to throttle by blocks rather than
+    nodes in each interval. (Andrew Wang via cdouglas)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9e9cd40..2dded68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -455,8 +455,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS_DEFAULT = 30000L;
   public static final String  DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY = "dfs.namenode.decommission.interval";
   public static final int     DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT = 30;
-  public static final String  DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY = "dfs.namenode.decommission.nodes.per.interval";
-  public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
+  public static final String  DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY = "dfs.namenode.decommission.blocks.per.interval";
+  public static final int     DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT = 500000;
+  public static final String  DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES = "dfs.namenode.decommission.max.concurrent.tracked.nodes";
+  public static final int     DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT = 100;
   public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
   public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index 8f2966a..29a2667 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
@@ -139,7 +139,7 @@ public class HdfsConfiguration extends Configuration {
       new DeprecationDelta("dfs.federation.nameservice.id",
         DFSConfigKeys.DFS_NAMESERVICE_ID),
       new DeprecationDelta("dfs.client.file-block-storage-locations.timeout",
-        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS)
+        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
     });
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 71ab1d6..1e3d95d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3226,28 +3226,6 @@ public class BlockManager {
     }
     return live;
   }
-
-  private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
-      NumberReplicas num) {
-    int curReplicas = num.liveReplicas();
-    int curExpectedReplicas = getReplication(block);
-    BlockCollection bc = blocksMap.getBlockCollection(block);
-    StringBuilder nodeList = new StringBuilder();
-    for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
-      final DatanodeDescriptor node = storage.getDatanodeDescriptor();
-      nodeList.append(node);
-      nodeList.append(" ");
-    }
-    LOG.info("Block: " + block + ", Expected Replicas: "
-        + curExpectedReplicas + ", live replicas: " + curReplicas
-        + ", corrupt replicas: " + num.corruptReplicas()
-        + ", decommissioned replicas: " + num.decommissionedReplicas()
-        + ", excess replicas: " + num.excessReplicas()
-        + ", Is Open File: " + bc.isUnderConstruction()
-        + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
-        + srcNode + ", Is current datanode decommissioning: "
-        + srcNode.isDecommissionInProgress());
-  }
   
   /**
    * On stopping decommission, check if the node has excess replicas.
@@ -3278,91 +3256,30 @@ public class BlockManager {
   }
 
   /**
-   * Return true if there are any blocks on this node that have not
-   * yet reached their replication factor. Otherwise returns false.
+   * Returns whether a node can be safely decommissioned based on its 
+   * liveness. Dead nodes cannot always be safely decommissioned.
    */
-  // TODO check the DN decommission logic for EC blocks
-  boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
-    boolean status = false;
-    boolean firstReplicationLog = true;
-    int underReplicatedBlocks = 0;
-    int decommissionOnlyReplicas = 0;
-    int underReplicatedInOpenFiles = 0;
-    final Iterator<BlockInfo> it = srcNode.getBlockIterator();
-    while(it.hasNext()) {
-      final BlockInfo block = it.next();
-      final short minStorage = getMinStorageNum(block);
-      BlockCollection bc = blocksMap.getBlockCollection(block);
-
-      if (bc != null) {
-        NumberReplicas num = countNodes(block);
-        int curReplicas = num.liveReplicas();
-        int curExpectedReplicas = getReplication(block);
-                
-        if (isNeededReplication(block, curExpectedReplicas, curReplicas)) {
-          if (curExpectedReplicas > curReplicas) {
-            if (bc.isUnderConstruction()) {
-              if (block.equals(bc.getLastBlock()) && curReplicas > minStorage) {
-                continue;
-              }
-              underReplicatedInOpenFiles++;
-            }
-            
-            // Log info about one block for this node which needs replication
-            if (!status) {
-              status = true;
-              if (firstReplicationLog) {
-                logBlockReplicationInfo(block, srcNode, num);
-              }
-              // Allowing decommission as long as default replication is met
-              if (curReplicas >= getDefaultStorageNum(block)) {
-                status = false;
-                firstReplicationLog = false;
-              }
-            }
-            underReplicatedBlocks++;
-            if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
-              decommissionOnlyReplicas++;
-            }
-          }
-          if (!neededReplications.contains(block) &&
-            pendingReplications.getNumReplicas(block) == 0 &&
-            namesystem.isPopulatingReplQueues()) {
-            //
-            // These blocks have been reported from the datanode
-            // after the startDecommission method has been executed. These
-            // blocks were in flight when the decommissioning was started.
-            // Process these blocks only when active NN is out of safe mode.
-            //
-            neededReplications.add(block,
-                                   curReplicas,
-                                   num.decommissionedReplicas(),
-                                   curExpectedReplicas);
-          }
-        }
-      }
+  boolean isNodeHealthyForDecommission(DatanodeDescriptor node) {
+    if (node.isAlive) {
+      return true;
     }
 
-    if (!status && !srcNode.isAlive) {
-      updateState();
-      if (pendingReplicationBlocksCount == 0 &&
-          underReplicatedBlocksCount == 0) {
-        LOG.info("srcNode {} is dead and there are no under-replicated" +
-            " blocks or blocks pending replication. Marking as " +
-            "decommissioned.");
-      } else {
-        LOG.warn("srcNode " + srcNode + " is dead " +
-            "while decommission is in progress. Continuing to mark " +
-            "it as decommission in progress so when it rejoins the " +
-            "cluster it can continue the decommission process.");
-        status = true;
-      }
+    updateState();
+    if (pendingReplicationBlocksCount == 0 &&
+        underReplicatedBlocksCount == 0) {
+      LOG.info("Node {} is dead and there are no under-replicated" +
+          " blocks or blocks pending replication. Safe to decommission.", 
+          node);
+      return true;
     }
 
-    srcNode.decommissioningStatus.set(underReplicatedBlocks,
-        decommissionOnlyReplicas, 
-        underReplicatedInOpenFiles);
-    return status;
+    LOG.warn("Node {} is dead " +
+        "while decommission is in progress. Cannot be safely " +
+        "decommissioned since there is risk of reduced " +
+        "data durability or data loss. Either restart the failed node or" +
+        " force decommissioning by removing, calling refreshNodes, " +
+        "then re-adding to the excludes files.", node);
+    return false;
   }
 
   public int getActiveBlockCount() {
@@ -3541,7 +3458,7 @@ public class BlockManager {
    * A block needs replication if the number of replicas is less than expected
    * or if it does not have enough racks.
    */
-  private boolean isNeededReplication(Block b, int expected, int current) {
+  boolean isNeededReplication(Block b, int expected, int current) {
     return current < expected || !blockHasEnoughRacks(b);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 45c56a8..9179ff0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.util.CyclicIteration;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
-import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 
@@ -53,8 +52,6 @@ import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.*;
 
-import static org.apache.hadoop.util.Time.now;
-
 /**
  * Manage datanodes, include decommission and other activities.
  */
@@ -65,9 +62,9 @@ public class DatanodeManager {
 
   private final Namesystem namesystem;
   private final BlockManager blockManager;
+  private final DecommissionManager decomManager;
   private final HeartbeatManager heartbeatManager;
   private final FSClusterStats fsClusterStats;
-  private Daemon decommissionthread = null;
 
   /**
    * Stores the datanode -> block map.  
@@ -110,7 +107,7 @@ public class DatanodeManager {
   private final HostFileManager hostFileManager = new HostFileManager();
 
   /** The period to wait for datanode heartbeat.*/
-  private final long heartbeatExpireInterval;
+  private long heartbeatExpireInterval;
   /** Ask Datanode only up to this many blocks to delete. */
   final int blockInvalidateLimit;
 
@@ -184,6 +181,8 @@ public class DatanodeManager {
     networktopology = NetworkTopology.getInstance(conf);
 
     this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
+    this.decomManager = new DecommissionManager(namesystem, blockManager,
+        heartbeatManager);
     this.fsClusterStats = newFSClusterStats();
 
     this.defaultXferPort = NetUtils.createSocketAddr(
@@ -307,25 +306,12 @@ public class DatanodeManager {
   }
   
   void activate(final Configuration conf) {
-    final DecommissionManager dm = new DecommissionManager(namesystem, blockManager);
-    this.decommissionthread = new Daemon(dm.new Monitor(
-        conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 
-                    DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT),
-        conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY, 
-                    DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT)));
-    decommissionthread.start();
-
+    decomManager.activate(conf);
     heartbeatManager.activate(conf);
   }
 
   void close() {
-    if (decommissionthread != null) {
-      decommissionthread.interrupt();
-      try {
-        decommissionthread.join(3000);
-      } catch (InterruptedException e) {
-      }
-    }
+    decomManager.close();
     heartbeatManager.close();
   }
 
@@ -340,6 +326,20 @@ public class DatanodeManager {
   }
 
   @VisibleForTesting
+  public DecommissionManager getDecomManager() {
+    return decomManager;
+  }
+
+  HostFileManager getHostFileManager() {
+    return hostFileManager;
+  }
+
+  @VisibleForTesting
+  public void setHeartbeatExpireInterval(long expiryMs) {
+    this.heartbeatExpireInterval = expiryMs;
+  }
+
+  @VisibleForTesting
   public FSClusterStats getFSClusterStats() {
     return fsClusterStats;
   }
@@ -826,63 +826,14 @@ public class DatanodeManager {
   }
 
   /**
-   * Decommission the node if it is in exclude list.
+   * Decommission the node if it is in the host exclude list.
+   *
+   * @param nodeReg datanode
    */
-  private void checkDecommissioning(DatanodeDescriptor nodeReg) { 
+  void startDecommissioningIfExcluded(DatanodeDescriptor nodeReg) {
     // If the registered node is in exclude list, then decommission it
-    if (hostFileManager.isExcluded(nodeReg)) {
-      startDecommission(nodeReg);
-    }
-  }
-
-  /**
-   * Change, if appropriate, the admin state of a datanode to 
-   * decommission completed. Return true if decommission is complete.
-   */
-  boolean checkDecommissionState(DatanodeDescriptor node) {
-    // Check to see if all blocks in this decommissioned
-    // node has reached their target replication factor.
-    if (node.isDecommissionInProgress() && node.checkBlockReportReceived()) {
-      if (!blockManager.isReplicationInProgress(node)) {
-        node.setDecommissioned();
-        LOG.info("Decommission complete for " + node);
-      }
-    }
-    return node.isDecommissioned();
-  }
-
-  /** Start decommissioning the specified datanode. */
-  @InterfaceAudience.Private
-  @VisibleForTesting
-  public void startDecommission(DatanodeDescriptor node) {
-    if (!node.isDecommissionInProgress()) {
-      if (!node.isAlive) {
-        LOG.info("Dead node " + node + " is decommissioned immediately.");
-        node.setDecommissioned();
-      } else if (!node.isDecommissioned()) {
-        for (DatanodeStorageInfo storage : node.getStorageInfos()) {
-          LOG.info("Start Decommissioning " + node + " " + storage
-              + " with " + storage.numBlocks() + " blocks");
-        }
-        heartbeatManager.startDecommission(node);
-        node.decommissioningStatus.setStartTime(now());
-
-        // all the blocks that reside on this node have to be replicated.
-        checkDecommissionState(node);
-      }
-    }
-  }
-
-  /** Stop decommissioning the specified datanodes. */
-  void stopDecommission(DatanodeDescriptor node) {
-    if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      LOG.info("Stop Decommissioning " + node);
-      heartbeatManager.stopDecommission(node);
-      // Over-replicated blocks will be detected and processed when 
-      // the dead node comes back and send in its full block report.
-      if (node.isAlive) {
-        blockManager.processOverReplicatedBlocksOnReCommission(node);
-      }
+    if (getHostFileManager().isExcluded(nodeReg)) {
+      decomManager.startDecommission(nodeReg);
     }
   }
 
@@ -993,7 +944,7 @@ public class DatanodeManager {
           // also treat the registration message as a heartbeat
           heartbeatManager.register(nodeS);
           incrementVersionCount(nodeS.getSoftwareVersion());
-          checkDecommissioning(nodeS);
+          startDecommissioningIfExcluded(nodeS);
           success = true;
         } finally {
           if (!success) {
@@ -1029,7 +980,7 @@ public class DatanodeManager {
         // because its is done when the descriptor is created
         heartbeatManager.addDatanode(nodeDescr);
         incrementVersionCount(nodeReg.getSoftwareVersion());
-        checkDecommissioning(nodeDescr);
+        startDecommissioningIfExcluded(nodeDescr);
         success = true;
       } finally {
         if (!success) {
@@ -1092,9 +1043,9 @@ public class DatanodeManager {
         node.setDisallowed(true); // case 2.
       } else {
         if (hostFileManager.isExcluded(node)) {
-          startDecommission(node); // case 3.
+          decomManager.startDecommission(node); // case 3.
         } else {
-          stopDecommission(node); // case 4.
+          decomManager.stopDecommission(node); // case 4.
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index a234cf5..dc17abe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -17,88 +17,605 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.AbstractList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
+import java.util.Queue;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.util.CyclicIteration;
+import org.apache.hadoop.util.ChunkedArrayList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static org.apache.hadoop.util.Time.now;
 
 /**
- * Manage node decommissioning.
+ * Manages datanode decommissioning. A background monitor thread 
+ * periodically checks the status of datanodes that are in-progress of 
+ * decommissioning.
+ * <p/>
+ * A datanode can be decommissioned in a few situations:
+ * <ul>
+ * <li>If a DN is dead, it is decommissioned immediately.</li>
+ * <li>If a DN is alive, it is decommissioned after all of its blocks 
+ * are sufficiently replicated. Merely under-replicated blocks do not 
+ * block decommissioning as long as they are above a replication 
+ * threshold.</li>
+ * </ul>
+ * In the second case, the datanode transitions to a 
+ * decommission-in-progress state and is tracked by the monitor thread. The 
+ * monitor periodically scans through the list of insufficiently replicated
+ * blocks on these datanodes to 
+ * determine if they can be decommissioned. The monitor also prunes this list 
+ * as blocks become replicated, so monitor scans will become more efficient 
+ * over time.
+ * <p/>
+ * Decommission-in-progress nodes that become dead do not progress to 
+ * decommissioned until they become live again. This prevents potential 
+ * durability loss for singly-replicated blocks (see HDFS-6791).
+ * <p/>
+ * This class depends on the FSNamesystem lock for synchronization.
  */
 @InterfaceAudience.Private
-@InterfaceStability.Evolving
-class DecommissionManager {
-  static final Log LOG = LogFactory.getLog(DecommissionManager.class);
+public class DecommissionManager {
+  private static final Logger LOG = LoggerFactory.getLogger(DecommissionManager
+      .class);
 
   private final Namesystem namesystem;
-  private final BlockManager blockmanager;
+  private final BlockManager blockManager;
+  private final HeartbeatManager hbManager;
+  private final ScheduledExecutorService executor;
+
+  /**
+   * Map containing the decommission-in-progress datanodes that are being
+   * tracked so they can be be marked as decommissioned.
+   * <p/>
+   * This holds a set of references to the under-replicated blocks on the DN at
+   * the time the DN is added to the map, i.e. the blocks that are preventing
+   * the node from being marked as decommissioned. During a monitor tick, this
+   * list is pruned as blocks becomes replicated.
+   * <p/>
+   * Note also that the reference to the list of under-replicated blocks 
+   * will be null on initial add
+   * <p/>
+   * However, this map can become out-of-date since it is not updated by block
+   * reports or other events. Before being finally marking as decommissioned,
+   * another check is done with the actual block map.
+   */
+  private final TreeMap<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
+      decomNodeBlocks;
+
+  /**
+   * Tracking a node in decomNodeBlocks consumes additional memory. To limit
+   * the impact on NN memory consumption, we limit the number of nodes in 
+   * decomNodeBlocks. Additional nodes wait in pendingNodes.
+   */
+  private final Queue<DatanodeDescriptor> pendingNodes;
+
+  private Monitor monitor = null;
 
   DecommissionManager(final Namesystem namesystem,
-      final BlockManager blockmanager) {
+      final BlockManager blockManager, final HeartbeatManager hbManager) {
     this.namesystem = namesystem;
-    this.blockmanager = blockmanager;
+    this.blockManager = blockManager;
+    this.hbManager = hbManager;
+
+    executor = Executors.newScheduledThreadPool(1,
+        new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d")
+            .setDaemon(true).build());
+    decomNodeBlocks = new TreeMap<>();
+    pendingNodes = new LinkedList<>();
+  }
+
+  /**
+   * Start the decommission monitor thread.
+   * @param conf
+   */
+  void activate(Configuration conf) {
+    final int intervalSecs =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+            DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT);
+    checkArgument(intervalSecs >= 0, "Cannot set a negative " +
+        "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
+
+    // By default, the new configuration key overrides the deprecated one.
+    // No # node limit is set.
+    int blocksPerInterval = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
+    int nodesPerInterval = Integer.MAX_VALUE;
+
+    // If the expected key isn't present and the deprecated one is, 
+    // use the deprecated one into the new one. This overrides the 
+    // default.
+    //
+    // Also print a deprecation warning.
+    final String deprecatedKey =
+        "dfs.namenode.decommission.nodes.per.interval";
+    final String strNodes = conf.get(deprecatedKey);
+    if (strNodes != null) {
+      nodesPerInterval = Integer.parseInt(strNodes);
+      blocksPerInterval = Integer.MAX_VALUE;
+      LOG.warn("Using deprecated configuration key {} value of {}.",
+          deprecatedKey, nodesPerInterval); 
+      LOG.warn("Please update your configuration to use {} instead.", 
+          DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
+    }
+    checkArgument(blocksPerInterval > 0,
+        "Must set a positive value for "
+        + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
+
+    final int maxConcurrentTrackedNodes = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
+    checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " +
+        "value for "
+        + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
+
+    monitor = new Monitor(blocksPerInterval, 
+        nodesPerInterval, maxConcurrentTrackedNodes);
+    executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
+        TimeUnit.SECONDS);
+
+    LOG.debug("Activating DecommissionManager with interval {} seconds, " +
+            "{} max blocks per interval, {} max nodes per interval, " +
+            "{} max concurrently tracked nodes.", intervalSecs,
+        blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes);
+  }
+
+  /**
+   * Stop the decommission monitor thread, waiting briefly for it to terminate.
+   */
+  void close() {
+    executor.shutdownNow();
+    try {
+      executor.awaitTermination(3000, TimeUnit.MILLISECONDS);
+    } catch (InterruptedException e) {}
+  }
+
+  /**
+   * Start decommissioning the specified datanode. 
+   * @param node
+   */
+  @VisibleForTesting
+  public void startDecommission(DatanodeDescriptor node) {
+    if (!node.isDecommissionInProgress()) {
+      if (!node.isAlive) {
+        LOG.info("Dead node {} is decommissioned immediately.", node);
+        node.setDecommissioned();
+      } else if (!node.isDecommissioned()) {
+        for (DatanodeStorageInfo storage : node.getStorageInfos()) {
+          LOG.info("Starting decommission of {} {} with {} blocks", 
+              node, storage, storage.numBlocks());
+        }
+        // Update DN stats maintained by HeartbeatManager
+        hbManager.startDecommission(node);
+        node.decommissioningStatus.setStartTime(now());
+        pendingNodes.add(node);
+      }
+    } else {
+      LOG.trace("startDecommission: Node {} is already decommission in "
+              + "progress, nothing to do.", node);
+    }
+  }
+
+  /**
+   * Stop decommissioning the specified datanode. 
+   * @param node
+   */
+  void stopDecommission(DatanodeDescriptor node) {
+    if (node.isDecommissionInProgress() || node.isDecommissioned()) {
+      LOG.info("Stopping decommissioning of node {}", node);
+      // Update DN stats maintained by HeartbeatManager
+      hbManager.stopDecommission(node);
+      // Over-replicated blocks will be detected and processed when 
+      // the dead node comes back and send in its full block report.
+      if (node.isAlive) {
+        blockManager.processOverReplicatedBlocksOnReCommission(node);
+      }
+      // Remove from tracking in DecommissionManager
+      pendingNodes.remove(node);
+      decomNodeBlocks.remove(node);
+    } else {
+      LOG.trace("stopDecommission: Node {} is not decommission in progress " +
+          "or decommissioned, nothing to do.", node);
+    }
+  }
+
+  private void setDecommissioned(DatanodeDescriptor dn) {
+    dn.setDecommissioned();
+    LOG.info("Decommissioning complete for node {}", dn);
   }
 
-  /** Periodically check decommission status. */
-  class Monitor implements Runnable {
-    /** recheckInterval is how often namenode checks
-     *  if a node has finished decommission
+  /**
+   * Checks whether a block is sufficiently replicated for decommissioning.
+   * Full-strength replication is not always necessary, hence "sufficient".
+   * @return true if sufficient, else false.
+   */
+  private boolean isSufficientlyReplicated(BlockInfoContiguous block, 
+      BlockCollection bc,
+      NumberReplicas numberReplicas) {
+    final int numExpected = bc.getBlockReplication();
+    final int numLive = numberReplicas.liveReplicas();
+    if (!blockManager.isNeededReplication(block, numExpected, numLive)) {
+      // Block doesn't need replication. Skip.
+      LOG.trace("Block {} does not need replication.", block);
+      return true;
+    }
+
+    // Block is under-replicated
+    LOG.trace("Block {} numExpected={}, numLive={}", block, numExpected, 
+        numLive);
+    if (numExpected > numLive) {
+      if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) {
+        // Can decom a UC block as long as there will still be minReplicas
+        if (numLive >= blockManager.minReplication) {
+          LOG.trace("UC block {} sufficiently-replicated since numLive ({}) "
+              + ">= minR ({})", block, numLive, blockManager.minReplication);
+          return true;
+        } else {
+          LOG.trace("UC block {} insufficiently-replicated since numLive "
+              + "({}) < minR ({})", block, numLive,
+              blockManager.minReplication);
+        }
+      } else {
+        // Can decom a non-UC as long as the default replication is met
+        if (numLive >= blockManager.defaultReplication) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  private static void logBlockReplicationInfo(Block block, BlockCollection bc,
+      DatanodeDescriptor srcNode, NumberReplicas num,
+      Iterable<DatanodeStorageInfo> storages) {
+    int curReplicas = num.liveReplicas();
+    int curExpectedReplicas = bc.getBlockReplication();
+    StringBuilder nodeList = new StringBuilder();
+    for (DatanodeStorageInfo storage : storages) {
+      final DatanodeDescriptor node = storage.getDatanodeDescriptor();
+      nodeList.append(node);
+      nodeList.append(" ");
+    }
+    LOG.info("Block: " + block + ", Expected Replicas: "
+        + curExpectedReplicas + ", live replicas: " + curReplicas
+        + ", corrupt replicas: " + num.corruptReplicas()
+        + ", decommissioned replicas: " + num.decommissionedReplicas()
+        + ", excess replicas: " + num.excessReplicas()
+        + ", Is Open File: " + bc.isUnderConstruction()
+        + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+        + srcNode + ", Is current datanode decommissioning: "
+        + srcNode.isDecommissionInProgress());
+  }
+
+  @VisibleForTesting
+  public int getNumPendingNodes() {
+    return pendingNodes.size();
+  }
+
+  @VisibleForTesting
+  public int getNumTrackedNodes() {
+    return decomNodeBlocks.size();
+  }
+
+  @VisibleForTesting
+  public int getNumNodesChecked() {
+    return monitor.numNodesChecked;
+  }
+
+  /**
+   * Checks to see if DNs have finished decommissioning.
+   * <p/>
+   * Since this is done while holding the namesystem lock, 
+   * the amount of work per monitor tick is limited.
+   */
+  private class Monitor implements Runnable {
+    /**
+     * The maximum number of blocks to check per tick.
+     */
+    private final int numBlocksPerCheck;
+    /**
+     * The maximum number of nodes to check per tick.
      */
-    private final long recheckInterval;
-    /** The number of decommission nodes to check for each interval */
     private final int numNodesPerCheck;
-    /** firstkey can be initialized to anything. */
-    private String firstkey = "";
+    /**
+     * The maximum number of nodes to track in decomNodeBlocks. A value of 0
+     * means no limit.
+     */
+    private final int maxConcurrentTrackedNodes;
+    /**
+     * The number of blocks that have been checked on this tick.
+     */
+    private int numBlocksChecked = 0;
+    /**
+     * The number of nodes that have been checked on this tick. Used for 
+     * testing.
+     */
+    private int numNodesChecked = 0;
+    /**
+     * The last datanode in decomNodeBlocks that we've processed
+     */
+    private DatanodeDescriptor iterkey = new DatanodeDescriptor(new 
+        DatanodeID("", "", "", 0, 0, 0, 0));
 
-    Monitor(int recheckIntervalInSecond, int numNodesPerCheck) {
-      this.recheckInterval = recheckIntervalInSecond * 1000L;
+    Monitor(int numBlocksPerCheck, int numNodesPerCheck, int 
+        maxConcurrentTrackedNodes) {
+      this.numBlocksPerCheck = numBlocksPerCheck;
       this.numNodesPerCheck = numNodesPerCheck;
+      this.maxConcurrentTrackedNodes = maxConcurrentTrackedNodes;
+    }
+
+    private boolean exceededNumBlocksPerCheck() {
+      LOG.trace("Processed {} blocks so far this tick", numBlocksChecked);
+      return numBlocksChecked >= numBlocksPerCheck;
+    }
+
+    @Deprecated
+    private boolean exceededNumNodesPerCheck() {
+      LOG.trace("Processed {} nodes so far this tick", numNodesChecked);
+      return numNodesChecked >= numNodesPerCheck;
     }
 
-    /**
-     * Check decommission status of numNodesPerCheck nodes
-     * for every recheckInterval milliseconds.
-     */
     @Override
     public void run() {
-      for(; namesystem.isRunning(); ) {
-        namesystem.writeLock();
-        try {
-          check();
-        } finally {
-          namesystem.writeUnlock();
+      if (!namesystem.isRunning()) {
+        LOG.info("Namesystem is not running, skipping decommissioning checks"
+            + ".");
+        return;
+      }
+      // Reset the checked count at beginning of each iteration
+      numBlocksChecked = 0;
+      numNodesChecked = 0;
+      // Check decom progress
+      namesystem.writeLock();
+      try {
+        processPendingNodes();
+        check();
+      } finally {
+        namesystem.writeUnlock();
+      }
+      if (numBlocksChecked + numNodesChecked > 0) {
+        LOG.info("Checked {} blocks and {} nodes this tick", numBlocksChecked,
+            numNodesChecked);
+      }
+    }
+
+    /**
+     * Pop datanodes off the pending list and into decomNodeBlocks, 
+     * subject to the maxConcurrentTrackedNodes limit.
+     */
+    private void processPendingNodes() {
+      while (!pendingNodes.isEmpty() &&
+          (maxConcurrentTrackedNodes == 0 ||
+           decomNodeBlocks.size() < maxConcurrentTrackedNodes)) {
+        decomNodeBlocks.put(pendingNodes.poll(), null);
+      }
+    }
+
+    private void check() {
+      final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>>
+          it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
+      final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
+
+      while (it.hasNext()
+          && !exceededNumBlocksPerCheck()
+          && !exceededNumNodesPerCheck()) {
+        numNodesChecked++;
+        final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
+            entry = it.next();
+        final DatanodeDescriptor dn = entry.getKey();
+        AbstractList<BlockInfoContiguous> blocks = entry.getValue();
+        boolean fullScan = false;
+        if (blocks == null) {
+          // This is a newly added datanode, run through its list to schedule 
+          // under-replicated blocks for replication and collect the blocks 
+          // that are insufficiently replicated for further tracking
+          LOG.debug("Newly-added node {}, doing full scan to find " +
+              "insufficiently-replicated blocks.", dn);
+          blocks = handleInsufficientlyReplicated(dn);
+          decomNodeBlocks.put(dn, blocks);
+          fullScan = true;
+        } else {
+          // This is a known datanode, check if its # of insufficiently 
+          // replicated blocks has dropped to zero and if it can be decommed
+          LOG.debug("Processing decommission-in-progress node {}", dn);
+          pruneSufficientlyReplicated(dn, blocks);
         }
-  
-        try {
-          Thread.sleep(recheckInterval);
-        } catch (InterruptedException ie) {
-          LOG.warn(this.getClass().getSimpleName() + " interrupted: " + ie);
+        if (blocks.size() == 0) {
+          if (!fullScan) {
+            // If we didn't just do a full scan, need to re-check with the 
+            // full block map.
+            //
+            // We've replicated all the known insufficiently replicated 
+            // blocks. Re-check with the full block map before finally 
+            // marking the datanode as decommissioned 
+            LOG.debug("Node {} has finished replicating current set of "
+                + "blocks, checking with the full block map.", dn);
+            blocks = handleInsufficientlyReplicated(dn);
+            decomNodeBlocks.put(dn, blocks);
+          }
+          // If the full scan is clean AND the node liveness is okay, 
+          // we can finally mark as decommissioned.
+          final boolean isHealthy =
+              blockManager.isNodeHealthyForDecommission(dn);
+          if (blocks.size() == 0 && isHealthy) {
+            setDecommissioned(dn);
+            toRemove.add(dn);
+            LOG.debug("Node {} is sufficiently replicated and healthy, "
+                + "marked as decommissioned.", dn);
+          } else {
+            if (LOG.isDebugEnabled()) {
+              StringBuilder b = new StringBuilder("Node {} ");
+              if (isHealthy) {
+                b.append("is ");
+              } else {
+                b.append("isn't ");
+              }
+              b.append("healthy and still needs to replicate {} more blocks," +
+                  " decommissioning is still in progress.");
+              LOG.debug(b.toString(), dn, blocks.size());
+            }
+          }
+        } else {
+          LOG.debug("Node {} still has {} blocks to replicate "
+                  + "before it is a candidate to finish decommissioning.",
+              dn, blocks.size());
         }
+        iterkey = dn;
+      }
+      // Remove the datanodes that are decommissioned
+      for (DatanodeDescriptor dn : toRemove) {
+        Preconditions.checkState(dn.isDecommissioned(),
+            "Removing a node that is not yet decommissioned!");
+        decomNodeBlocks.remove(dn);
       }
     }
-    
-    private void check() {
-      final DatanodeManager dm = blockmanager.getDatanodeManager();
-      int count = 0;
-      for(Map.Entry<String, DatanodeDescriptor> entry
-          : dm.getDatanodeCyclicIteration(firstkey)) {
-        final DatanodeDescriptor d = entry.getValue();
-        firstkey = entry.getKey();
-
-        if (d.isDecommissionInProgress()) {
-          try {
-            dm.checkDecommissionState(d);
-          } catch(Exception e) {
-            LOG.warn("entry=" + entry, e);
+
+    /**
+     * Removes sufficiently replicated blocks from the block list of a 
+     * datanode.
+     */
+    private void pruneSufficientlyReplicated(final DatanodeDescriptor datanode,
+        AbstractList<BlockInfoContiguous> blocks) {
+      processBlocksForDecomInternal(datanode, blocks.iterator(), null, true);
+    }
+
+    /**
+     * Returns a list of blocks on a datanode that are insufficiently 
+     * replicated, i.e. are under-replicated enough to prevent decommission.
+     * <p/>
+     * As part of this, it also schedules replication work for 
+     * any under-replicated blocks.
+     *
+     * @param datanode
+     * @return List of insufficiently replicated blocks 
+     */
+    private AbstractList<BlockInfoContiguous> handleInsufficientlyReplicated(
+        final DatanodeDescriptor datanode) {
+      AbstractList<BlockInfoContiguous> insufficient = new ChunkedArrayList<>();
+      processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
+          insufficient, false);
+      return insufficient;
+    }
+
+    /**
+     * Used while checking if decommission-in-progress datanodes can be marked
+     * as decommissioned. Combines shared logic of 
+     * pruneSufficientlyReplicated and handleInsufficientlyReplicated.
+     *
+     * @param datanode                    Datanode
+     * @param it                          Iterator over the blocks on the
+     *                                    datanode
+     * @param insufficientlyReplicated    Return parameter. If it's not null,
+     *                                    will contain the insufficiently
+     *                                    replicated-blocks from the list.
+     * @param pruneSufficientlyReplicated whether to remove sufficiently
+     *                                    replicated blocks from the iterator
+     * @return true if there are under-replicated blocks in the provided block
+     * iterator, else false.
+     */
+    private void processBlocksForDecomInternal(
+        final DatanodeDescriptor datanode,
+        final Iterator<? extends BlockInfo> it,
+        final List<BlockInfoContiguous> insufficientlyReplicated,
+        boolean pruneSufficientlyReplicated) {
+      boolean firstReplicationLog = true;
+      int underReplicatedBlocks = 0;
+      int decommissionOnlyReplicas = 0;
+      int underReplicatedInOpenFiles = 0;
+      while (it.hasNext()) {
+        numBlocksChecked++;
+        final BlockInfoContiguous block = (BlockInfoContiguous) it.next();
+        // Remove the block from the list if it's no longer in the block map,
+        // e.g. the containing file has been deleted
+        if (blockManager.blocksMap.getStoredBlock(block) == null) {
+          LOG.trace("Removing unknown block {}", block);
+          it.remove();
+          continue;
+        }
+        BlockCollection bc = blockManager.blocksMap.getBlockCollection(block);
+        if (bc == null) {
+          // Orphan block, will be invalidated eventually. Skip.
+          continue;
+        }
+
+        final NumberReplicas num = blockManager.countNodes(block);
+        final int liveReplicas = num.liveReplicas();
+        final int curReplicas = liveReplicas;
+
+        // Schedule under-replicated blocks for replication if not already
+        // pending
+        if (blockManager.isNeededReplication(block, bc.getBlockReplication(),
+            liveReplicas)) {
+          if (!blockManager.neededReplications.contains(block) &&
+              blockManager.pendingReplications.getNumReplicas(block) == 0 &&
+              namesystem.isPopulatingReplQueues()) {
+            // Process these blocks only when active NN is out of safe mode.
+            blockManager.neededReplications.add(block,
+                curReplicas,
+                num.decommissionedReplicas(),
+                bc.getBlockReplication());
           }
-          if (++count == numNodesPerCheck) {
-            return;
+        }
+
+        // Even if the block is under-replicated, 
+        // it doesn't block decommission if it's sufficiently replicated 
+        if (isSufficientlyReplicated(block, bc, num)) {
+          if (pruneSufficientlyReplicated) {
+            it.remove();
           }
+          continue;
+        }
+
+        // We've found an insufficiently replicated block.
+        if (insufficientlyReplicated != null) {
+          insufficientlyReplicated.add(block);
+        }
+        // Log if this is our first time through
+        if (firstReplicationLog) {
+          logBlockReplicationInfo(block, bc, datanode, num,
+              blockManager.blocksMap.getStorages(block));
+          firstReplicationLog = false;
+        }
+        // Update various counts
+        underReplicatedBlocks++;
+        if (bc.isUnderConstruction()) {
+          underReplicatedInOpenFiles++;
+        }
+        if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
+          decommissionOnlyReplicas++;
         }
       }
+
+      datanode.decommissioningStatus.set(underReplicatedBlocks,
+          decommissionOnlyReplicas,
+          underReplicatedInOpenFiles);
     }
   }
+
+  @VisibleForTesting
+  void runMonitor() throws ExecutionException, InterruptedException {
+    Future f = executor.submit(monitor);
+    f.get();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 7eacfc5..736c96a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -736,10 +736,25 @@
 </property>
 
 <property>
-  <name>dfs.namenode.decommission.nodes.per.interval</name>
-  <value>5</value>
-  <description>The number of nodes namenode checks if decommission is complete
-  in each dfs.namenode.decommission.interval.</description>
+  <name>dfs.namenode.decommission.blocks.per.interval</name>
+  <value>500000</value>
+  <description>The approximate number of blocks to process per 
+      decommission interval, as defined in dfs.namenode.decommission.interval.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.decommission.max.concurrent.tracked.nodes</name>
+  <value>100</value>
+  <description>
+    The maximum number of decommission-in-progress datanodes nodes that will be
+    tracked at one time by the namenode. Tracking a decommission-in-progress
+    datanode consumes additional NN memory proportional to the number of blocks
+    on the datnode. Having a conservative limit reduces the potential impact
+    of decomissioning a large number of nodes at once.
+      
+    A value of 0 means no limit will be enforced.
+  </description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 35c0d8c..d285506 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -26,39 +27,57 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Random;
+import java.util.concurrent.ExecutionException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class tests the decommissioning of nodes.
  */
 public class TestDecommission {
-  public static final Log LOG = LogFactory.getLog(TestDecommission.class);
+  public static final Logger LOG = LoggerFactory.getLogger(TestDecommission
+      .class);
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
   static final int fileSize = 16384;
@@ -90,6 +109,7 @@ public class TestDecommission {
     conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);
@@ -106,7 +126,7 @@ public class TestDecommission {
     }
   }
   
-  private void writeConfigFile(Path name, ArrayList<String> nodes) 
+  private void writeConfigFile(Path name, List<String> nodes) 
     throws IOException {
     // delete if it already exists
     if (localFileSys.exists(name)) {
@@ -150,7 +170,7 @@ public class TestDecommission {
    * @param downnode - if null, there is no decommissioned node for this file.
    * @return - null if no failure found, else an error message string.
    */
-  private String checkFile(FileSystem fileSys, Path name, int repl,
+  private static String checkFile(FileSystem fileSys, Path name, int repl,
     String downnode, int numDatanodes) throws IOException {
     boolean isNodeDown = (downnode != null);
     // need a raw stream
@@ -262,7 +282,7 @@ public class TestDecommission {
   /* Ask a specific NN to stop decommission of the datanode and wait for each
    * to reach the NORMAL state.
    */
-  private void recomissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
+  private void recommissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
     LOG.info("Recommissioning node: " + decommissionedNode);
     writeConfigFile(excludeFile, null);
     refreshNodes(cluster.getNamesystem(nnIndex), conf);
@@ -280,7 +300,7 @@ public class TestDecommission {
       LOG.info("Waiting for node " + node + " to change state to "
           + state + " current state: " + node.getAdminState());
       try {
-        Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+        Thread.sleep(HEARTBEAT_INTERVAL * 500);
       } catch (InterruptedException e) {
         // nothing
       }
@@ -322,28 +342,27 @@ public class TestDecommission {
   }
   
   private void verifyStats(NameNode namenode, FSNamesystem fsn,
-      DatanodeInfo node, boolean decommissioning)
+      DatanodeInfo info, DataNode node, boolean decommissioning)
       throws InterruptedException, IOException {
-    // Do the stats check over 10 iterations
+    // Do the stats check over 10 heartbeats
     for (int i = 0; i < 10; i++) {
       long[] newStats = namenode.getRpcServer().getStats();
 
       // For decommissioning nodes, ensure capacity of the DN is no longer
       // counted. Only used space of the DN is counted in cluster capacity
-      assertEquals(newStats[0], decommissioning ? node.getDfsUsed() : 
-        node.getCapacity());
+      assertEquals(newStats[0],
+          decommissioning ? info.getDfsUsed() : info.getCapacity());
 
       // Ensure cluster used capacity is counted for both normal and
       // decommissioning nodes
-      assertEquals(newStats[1], node.getDfsUsed());
+      assertEquals(newStats[1], info.getDfsUsed());
 
       // For decommissioning nodes, remaining space from the DN is not counted
-      assertEquals(newStats[2], decommissioning ? 0 : node.getRemaining());
+      assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
 
       // Ensure transceiver count is same as that DN
-      assertEquals(fsn.getTotalLoad(), node.getXceiverCount());
-      
-      Thread.sleep(HEARTBEAT_INTERVAL * 1000); // Sleep heart beat interval
+      assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
+      DataNodeTestUtils.triggerHeartbeat(node);
     }
   }
 
@@ -408,14 +427,6 @@ public class TestDecommission {
   }
   
   /**
-   * Tests recommission for non federated cluster
-   */
-  @Test(timeout=360000)
-  public void testRecommission() throws IOException {
-    testRecommission(1, 6);
-  }
-
-  /**
    * Test decommission for federeated cluster
    */
   @Test(timeout=360000)
@@ -501,12 +512,12 @@ public class TestDecommission {
     //    1. the last DN would have been chosen as excess replica, given its
     //    heartbeat is considered old.
     //    Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete
-    //    2. After recomissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
+    //    2. After recommissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
     //    and one excess replica ( 3 )
     // After the fix,
-    //    After recomissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
+    //    After recommissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
     Thread.sleep(slowHeartbeatDNwaitTime);
-    recomissionNode(1, decomNodeFromSBN);
+    recommissionNode(1, decomNodeFromSBN);
 
     // Step 3.b, ask ANN to recommission the first DN.
     // To verify the fix, the test makes sure the excess replica picked by ANN
@@ -525,7 +536,7 @@ public class TestDecommission {
     cluster.restartDataNode(nextToLastDNprop);
     cluster.waitActive();
     Thread.sleep(slowHeartbeatDNwaitTime);
-    recomissionNode(0, decommissionedNodeFromANN);
+    recommissionNode(0, decommissionedNodeFromANN);
 
     // Step 3.c, make sure the DN has deleted the block and report to NNs
     cluster.triggerHeartbeats();
@@ -607,69 +618,87 @@ public class TestDecommission {
     cluster.shutdown();
   }
 
+  /**
+   * Test that over-replicated blocks are deleted on recommission.
+   */
+  @Test(timeout=120000)
+  public void testRecommission() throws Exception {
+    final int numDatanodes = 6;
+    try {
+      LOG.info("Starting test testRecommission");
 
-  private void testRecommission(int numNamenodes, int numDatanodes) 
-    throws IOException {
-    LOG.info("Starting test testRecommission");
+      startCluster(1, numDatanodes, conf);
 
-    startCluster(numNamenodes, numDatanodes, conf);
-  
-    ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = 
-      new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
-    for(int i = 0; i < numNamenodes; i++) {
-      namenodeDecomList.add(i, new ArrayList<DatanodeInfo>(numDatanodes));
-    }
-    Path file1 = new Path("testDecommission.dat");
-    int replicas = numDatanodes - 1;
-      
-    for (int i = 0; i < numNamenodes; i++) {
-      ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(i);
-      FileSystem fileSys = cluster.getFileSystem(i);
+      final Path file1 = new Path("testDecommission.dat");
+      final int replicas = numDatanodes - 1;
+
+      ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
+      final FileSystem fileSys = cluster.getFileSystem();
+
+      // Write a file to n-1 datanodes
       writeFile(fileSys, file1, replicas);
-        
-      // Decommission one node. Verify that node is decommissioned.
-      DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
-          AdminStates.DECOMMISSIONED);
+
+      // Decommission one of the datanodes with a replica
+      BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
+      assertEquals("Unexpected number of replicas from getFileBlockLocations",
+          replicas, loc.getHosts().length);
+      final String toDecomHost = loc.getNames()[0];
+      String toDecomUuid = null;
+      for (DataNode d : cluster.getDataNodes()) {
+        if (d.getDatanodeId().getXferAddr().equals(toDecomHost)) {
+          toDecomUuid = d.getDatanodeId().getDatanodeUuid();
+          break;
+        }
+      }
+      assertNotNull("Could not find a dn with the block!", toDecomUuid);
+      final DatanodeInfo decomNode =
+          decommissionNode(0, toDecomUuid, decommissionedNodes,
+              AdminStates.DECOMMISSIONED);
       decommissionedNodes.add(decomNode);
-        
+      final BlockManager blockManager =
+          cluster.getNamesystem().getBlockManager();
+      final DatanodeManager datanodeManager =
+          blockManager.getDatanodeManager();
+      BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+
       // Ensure decommissioned datanode is not automatically shutdown
-      DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
-      assertEquals("All datanodes must be alive", numDatanodes, 
+      DFSClient client = getDfsClient(cluster.getNameNode(), conf);
+      assertEquals("All datanodes must be alive", numDatanodes,
           client.datanodeReport(DatanodeReportType.LIVE).length);
-      int tries =0;
+
       // wait for the block to be replicated
-      while (tries++ < 20) {
-        try {
-          Thread.sleep(1000);
-          if (checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
-              numDatanodes) == null) {
-            break;
-          }
-        } catch (InterruptedException ie) {
-        }
-      }
-      assertTrue("Checked if block was replicated after decommission, tried "
-          + tries + " times.", tries < 20);
-
-      // stop decommission and check if the new replicas are removed
-      recomissionNode(0, decomNode);
-      // wait for the block to be deleted
-      tries = 0;
-      while (tries++ < 20) {
-        try {
-          Thread.sleep(1000);
-          if (checkFile(fileSys, file1, replicas, null, numDatanodes) == null) {
-            break;
+      final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
+      final String uuid = toDecomUuid;
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          BlockInfo info = blockManager.getStoredBlock(b.getLocalBlock());
+          int count = 0;
+          StringBuilder sb = new StringBuilder("Replica locations: ");
+          for (int i = 0; i < info.numNodes(); i++) {
+            DatanodeDescriptor dn = info.getDatanode(i);
+            sb.append(dn + ", ");
+            if (!dn.getDatanodeUuid().equals(uuid)) {
+              count++;
+            }
           }
-        } catch (InterruptedException ie) {
+          LOG.info(sb.toString());
+          LOG.info("Count: " + count);
+          return count == replicas;
         }
-      }
+      }, 500, 30000);
+
+      // redecommission and wait for over-replication to be fixed
+      recommissionNode(0, decomNode);
+      BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+      DFSTestUtil.waitForReplication(cluster, b, 1, replicas, 0);
+
       cleanupFile(fileSys, file1);
-      assertTrue("Checked if node was recommissioned " + tries + " times.",
-         tries < 20);
-      LOG.info("tried: " + tries + " times before recommissioned");
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
-    cluster.shutdown();
   }
   
   /**
@@ -703,20 +732,35 @@ public class TestDecommission {
       
       FSNamesystem fsn = cluster.getNamesystem(i);
       NameNode namenode = cluster.getNameNode(i);
-      DatanodeInfo downnode = decommissionNode(i, null, null,
+      
+      DatanodeInfo decomInfo = decommissionNode(i, null, null,
           AdminStates.DECOMMISSION_INPROGRESS);
+      DataNode decomNode = getDataNode(decomInfo);
       // Check namenode stats for multiple datanode heartbeats
-      verifyStats(namenode, fsn, downnode, true);
+      verifyStats(namenode, fsn, decomInfo, decomNode, true);
       
       // Stop decommissioning and verify stats
       writeConfigFile(excludeFile, null);
       refreshNodes(fsn, conf);
-      DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
-      waitNodeState(ret, AdminStates.NORMAL);
-      verifyStats(namenode, fsn, ret, false);
+      DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
+      DataNode retNode = getDataNode(decomInfo);
+      waitNodeState(retInfo, AdminStates.NORMAL);
+      verifyStats(namenode, fsn, retInfo, retNode, false);
     }
   }
-  
+
+  private DataNode getDataNode(DatanodeInfo decomInfo) {
+    DataNode decomNode = null;
+    for (DataNode dn: cluster.getDataNodes()) {
+      if (decomInfo.equals(dn.getDatanodeId())) {
+        decomNode = dn;
+        break;
+      }
+    }
+    assertNotNull("Could not find decomNode in cluster!", decomNode);
+    return decomNode;
+  }
+
   /**
    * Test host/include file functionality. Only datanodes
    * in the include file are allowed to connect to the namenode in a non
@@ -902,9 +946,9 @@ public class TestDecommission {
    * It is not recommended to use a registration name which is not also a
    * valid DNS hostname for the DataNode.  See HDFS-5237 for background.
    */
+  @Ignore
   @Test(timeout=360000)
-  public void testIncludeByRegistrationName() throws IOException,
-      InterruptedException {
+  public void testIncludeByRegistrationName() throws Exception {
     Configuration hdfsConf = new Configuration(conf);
     // Any IPv4 address starting with 127 functions as a "loopback" address
     // which is connected to the current host.  So by choosing 127.0.0.100
@@ -927,15 +971,22 @@ public class TestDecommission {
     refreshNodes(cluster.getNamesystem(0), hdfsConf);
 
     // Wait for the DN to be marked dead.
-    DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
-    while (true) {
-      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
-      if (info.length == 1) {
-        break;
+    LOG.info("Waiting for DN to be marked as dead.");
+    final DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        BlockManagerTestUtil
+            .checkHeartbeat(cluster.getNamesystem().getBlockManager());
+        try {
+          DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
+          return info.length == 1;
+        } catch (IOException e) {
+          LOG.warn("Failed to check dead DNs", e);
+          return false;
+        }
       }
-      LOG.info("Waiting for datanode to be marked dead");
-      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
-    }
+    }, 500, 5000);
 
     // Use a non-empty include file with our registration name.
     // It should work.
@@ -945,18 +996,169 @@ public class TestDecommission {
     writeConfigFile(hostsFile,  nodes);
     refreshNodes(cluster.getNamesystem(0), hdfsConf);
     cluster.restartDataNode(0);
+    cluster.triggerHeartbeats();
 
     // Wait for the DN to come back.
-    while (true) {
-      DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
-      if (info.length == 1) {
-        Assert.assertFalse(info[0].isDecommissioned());
-        Assert.assertFalse(info[0].isDecommissionInProgress());
-        assertEquals(registrationName, info[0].getHostName());
-        break;
+    LOG.info("Waiting for DN to come back.");
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        BlockManagerTestUtil
+            .checkHeartbeat(cluster.getNamesystem().getBlockManager());
+        try {
+          DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
+          if (info.length == 1) {
+            Assert.assertFalse(info[0].isDecommissioned());
+            Assert.assertFalse(info[0].isDecommissionInProgress());
+            assertEquals(registrationName, info[0].getHostName());
+            return true;
+          }
+        } catch (IOException e) {
+          LOG.warn("Failed to check dead DNs", e);
+        }
+        return false;
       }
-      LOG.info("Waiting for datanode to come back");
-      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
+    }, 500, 5000);
+  }
+  
+  @Test(timeout=120000)
+  public void testBlocksPerInterval() throws Exception {
+    Configuration newConf = new Configuration(conf);
+    org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+        .setLevel(Level.TRACE);
+    // Turn the blocks per interval way down
+    newConf.setInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
+        3);
+    // Disable the normal monitor runs
+    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+        Integer.MAX_VALUE);
+    startCluster(1, 3, newConf);
+    final FileSystem fs = cluster.getFileSystem();
+    final DatanodeManager datanodeManager =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    final DecommissionManager decomManager = datanodeManager.getDecomManager();
+
+    // Write a 3 block file, so each node has one block. Should scan 3 nodes.
+    DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 3);
+    // Write another file, should only scan two
+    DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 2);
+    // One more file, should only scan 1
+    DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 1);
+    // blocks on each DN now exceeds limit, still scan at least one node
+    DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
+    doDecomCheck(datanodeManager, decomManager, 1);
+  }
+
+  @Deprecated
+  @Test(timeout=120000)
+  public void testNodesPerInterval() throws Exception {
+    Configuration newConf = new Configuration(conf);
+    org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+        .setLevel(Level.TRACE);
+    // Set the deprecated configuration key which limits the # of nodes per 
+    // interval
+    newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
+    // Disable the normal monitor runs
+    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+        Integer.MAX_VALUE);
+    startCluster(1, 3, newConf);
+    final FileSystem fs = cluster.getFileSystem();
+    final DatanodeManager datanodeManager =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    final DecommissionManager decomManager = datanodeManager.getDecomManager();
+
+    // Write a 3 block file, so each node has one block. Should scan 1 node 
+    // each time.
+    DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
+    for (int i=0; i<3; i++) {
+      doDecomCheck(datanodeManager, decomManager, 1);
     }
   }
+
+  private void doDecomCheck(DatanodeManager datanodeManager,
+      DecommissionManager decomManager, int expectedNumCheckedNodes)
+      throws IOException, ExecutionException, InterruptedException {
+    // Decom all nodes
+    ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
+    for (DataNode d: cluster.getDataNodes()) {
+      DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
+          decommissionedNodes,
+          AdminStates.DECOMMISSION_INPROGRESS);
+      decommissionedNodes.add(dn);
+    }
+    // Run decom scan and check
+    BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+    assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, 
+        decomManager.getNumNodesChecked());
+    // Recommission all nodes
+    for (DatanodeInfo dn : decommissionedNodes) {
+      recommissionNode(0, dn);
+    }
+  }
+
+  @Test(timeout=120000)
+  public void testPendingNodes() throws Exception {
+    Configuration newConf = new Configuration(conf);
+    org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+        .setLevel(Level.TRACE);
+    // Only allow one node to be decom'd at a time
+    newConf.setInt(
+        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
+        1);
+    // Disable the normal monitor runs
+    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 
+        Integer.MAX_VALUE);
+    startCluster(1, 3, newConf);
+    final FileSystem fs = cluster.getFileSystem();
+    final DatanodeManager datanodeManager =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    final DecommissionManager decomManager = datanodeManager.getDecomManager();
+
+    // Keep a file open to prevent decom from progressing
+    HdfsDataOutputStream open1 =
+        (HdfsDataOutputStream) fs.create(new Path("/openFile1"), (short)3);
+    // Flush and trigger block reports so the block definitely shows up on NN
+    open1.write(123);
+    open1.hflush();
+    for (DataNode d: cluster.getDataNodes()) {
+      DataNodeTestUtils.triggerBlockReport(d);
+    }
+    // Decom two nodes, so one is still alive
+    ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
+    for (int i=0; i<2; i++) {
+      final DataNode d = cluster.getDataNodes().get(i);
+      DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(), 
+          decommissionedNodes, 
+          AdminStates.DECOMMISSION_INPROGRESS);
+      decommissionedNodes.add(dn);
+    }
+
+    for (int i=2; i>=0; i--) {
+      assertTrackedAndPending(decomManager, 0, i);
+      BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+    }
+
+    // Close file, try to decom the last node, should get stuck in tracked
+    open1.close();
+    final DataNode d = cluster.getDataNodes().get(2);
+    DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
+        decommissionedNodes,
+        AdminStates.DECOMMISSION_INPROGRESS);
+    decommissionedNodes.add(dn);
+    BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
+    
+    assertTrackedAndPending(decomManager, 1, 0);
+  }
+
+  private void assertTrackedAndPending(DecommissionManager decomManager,
+      int tracked, int pending) {
+    assertEquals("Unexpected number of tracked nodes", tracked,
+        decomManager.getNumTrackedNodes());
+    assertEquals("Unexpected number of pending nodes", pending,
+        decomManager.getNumPendingNodes());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index fccd308..f61176e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -300,9 +301,8 @@ public class BlockManagerTestUtil {
    * Have DatanodeManager check decommission state.
    * @param dm the DatanodeManager to manipulate
    */
-  public static void checkDecommissionState(DatanodeManager dm,
-      DatanodeDescriptor node) {
-    dm.checkDecommissionState(node);
+  public static void recheckDecommissionState(DatanodeManager dm)
+      throws ExecutionException, InterruptedException {
+    dm.getDecomManager().runMonitor();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
index d9066e8..d514768 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
@@ -137,7 +137,7 @@ public class TestReplicationPolicyConsiderLoad {
       // returns false
       for (int i = 0; i < 3; i++) {
         DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
-        dnManager.startDecommission(d);
+        dnManager.getDecomManager().startDecommission(d);
         d.setDecommissioned();
       }
       assertEquals((double)load/3, dnManager.getFSClusterStats()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index a9aba86..789ee6f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -29,7 +28,6 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Random;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.hadoop.conf.Configuration;
@@ -53,7 +51,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -90,7 +93,8 @@ public class TestDecommissioningStatus {
     conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
     Path includeFile = new Path(dir, "include");
     conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
+        1000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
         4);
@@ -104,6 +108,9 @@ public class TestDecommissioningStatus {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
+    cluster.getNamesystem().getBlockManager().getDatanodeManager()
+        .setHeartbeatExpireInterval(3000);
+    Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
   }
 
   @AfterClass
@@ -186,13 +193,16 @@ public class TestDecommissioningStatus {
   private void checkDecommissionStatus(DatanodeDescriptor decommNode,
       int expectedUnderRep, int expectedDecommissionOnly,
       int expectedUnderRepInOpenFiles) {
-    assertEquals(decommNode.decommissioningStatus.getUnderReplicatedBlocks(),
-        expectedUnderRep);
+    assertEquals("Unexpected num under-replicated blocks",
+        expectedUnderRep,
+        decommNode.decommissioningStatus.getUnderReplicatedBlocks());
+    assertEquals("Unexpected number of decom-only replicas",
+        expectedDecommissionOnly,
+        decommNode.decommissioningStatus.getDecommissionOnlyReplicas());
     assertEquals(
-        decommNode.decommissioningStatus.getDecommissionOnlyReplicas(),
-        expectedDecommissionOnly);
-    assertEquals(decommNode.decommissioningStatus
-        .getUnderReplicatedInOpenFiles(), expectedUnderRepInOpenFiles);
+        "Unexpected number of replicas in under-replicated open files",
+        expectedUnderRepInOpenFiles,
+        decommNode.decommissioningStatus.getUnderReplicatedInOpenFiles());
   }
 
   private void checkDFSAdminDecommissionStatus(
@@ -244,7 +254,7 @@ public class TestDecommissioningStatus {
    * Tests Decommissioning Status in DFS.
    */
   @Test
-  public void testDecommissionStatus() throws IOException, InterruptedException {
+  public void testDecommissionStatus() throws Exception {
     InetSocketAddress addr = new InetSocketAddress("localhost", cluster
         .getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
@@ -253,7 +263,7 @@ public class TestDecommissioningStatus {
     DistributedFileSystem fileSys = cluster.getFileSystem();
     DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
 
-    short replicas = 2;
+    short replicas = numDatanodes;
     //
     // Decommission one node. Verify the decommission status
     // 
@@ -263,7 +273,9 @@ public class TestDecommissioningStatus {
 
     Path file2 = new Path("decommission1.dat");
     FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
-    Thread.sleep(5000);
+    for (DataNode d: cluster.getDataNodes()) {
+      DataNodeTestUtils.triggerBlockReport(d);
+    }
 
     FSNamesystem fsn = cluster.getNamesystem();
     final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
@@ -271,19 +283,22 @@ public class TestDecommissioningStatus {
       String downnode = decommissionNode(fsn, client, localFileSys, iteration);
       dm.refreshNodes(conf);
       decommissionedNodes.add(downnode);
-      Thread.sleep(5000);
+      BlockManagerTestUtil.recheckDecommissionState(dm);
       final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
       if (iteration == 0) {
         assertEquals(decommissioningNodes.size(), 1);
         DatanodeDescriptor decommNode = decommissioningNodes.get(0);
-        checkDecommissionStatus(decommNode, 4, 0, 2);
+        checkDecommissionStatus(decommNode, 3, 0, 1);
         checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
             fileSys, admin);
       } else {
         assertEquals(decommissioningNodes.size(), 2);
         DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
         DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
-        checkDecommissionStatus(decommNode1, 4, 4, 2);
+        // This one is still 3,3,1 since it passed over the UC block 
+        // earlier, before node 2 was decommed
+        checkDecommissionStatus(decommNode1, 3, 3, 1);
+        // This one is 4,4,2 since it has the full state
         checkDecommissionStatus(decommNode2, 4, 4, 2);
         checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2),
             fileSys, admin);
@@ -305,8 +320,7 @@ public class TestDecommissioningStatus {
    * the replication process after it rejoins the cluster.
    */
   @Test(timeout=120000)
-  public void testDecommissionStatusAfterDNRestart()
-      throws IOException, InterruptedException {
+  public void testDecommissionStatusAfterDNRestart() throws Exception {
     DistributedFileSystem fileSys =
         (DistributedFileSystem)cluster.getFileSystem();
 
@@ -345,7 +359,7 @@ public class TestDecommissioningStatus {
     BlockManagerTestUtil.checkHeartbeat(fsn.getBlockManager());
 
     // Force DatanodeManager to check decommission state.
-    BlockManagerTestUtil.checkDecommissionState(dm, dead.get(0));
+    BlockManagerTestUtil.recheckDecommissionState(dm);
 
     // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
     assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
@@ -359,7 +373,7 @@ public class TestDecommissioningStatus {
     // Delete the under-replicated file, which should let the 
     // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
     cleanupFile(fileSys, f);
-    BlockManagerTestUtil.checkDecommissionState(dm, dead.get(0));
+    BlockManagerTestUtil.recheckDecommissionState(dm);
     assertTrue("the node should be decommissioned",
         dead.get(0).isDecommissioned());
 
@@ -380,8 +394,9 @@ public class TestDecommissioningStatus {
    * DECOMMISSIONED
    */
   @Test(timeout=120000)
-  public void testDecommissionDeadDN()
-      throws IOException, InterruptedException, TimeoutException {
+  public void testDecommissionDeadDN() throws Exception {
+    Logger log = Logger.getLogger(DecommissionManager.class);
+    log.setLevel(Level.DEBUG);
     DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
     String dnName = dnID.getXferAddr();
     DataNodeProperties stoppedDN = cluster.stopDataNode(0);
@@ -392,7 +407,7 @@ public class TestDecommissioningStatus {
     DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
     decommissionNode(fsn, localFileSys, dnName);
     dm.refreshNodes(conf);
-    BlockManagerTestUtil.checkDecommissionState(dm, dnDescriptor);
+    BlockManagerTestUtil.recheckDecommissionState(dm);
     assertTrue(dnDescriptor.isDecommissioned());
 
     // Add the node back

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 0b23b84..cddc457 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1305,7 +1305,7 @@ public class TestFsck {
           .getBlockManager().getBlockCollection(eb.getLocalBlock())
           .getBlocks()[0].getDatanode(0);
       cluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().startDecommission(dn);
+          .getDatanodeManager().getDecomManager().startDecommission(dn);
       String dnName = dn.getXferAddr();
 
       //wait for decommission start

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e4dfe2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
index 426563b..35a611b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
@@ -30,8 +30,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
@@ -240,7 +238,7 @@ public class TestNamenodeCapacityReport {
         DatanodeDescriptor dnd =
             dnm.getDatanode(datanodes.get(i).getDatanodeId());
         expectedInServiceLoad -= dnd.getXceiverCount();
-        dnm.startDecommission(dnd);
+        dnm.getDecomManager().startDecommission(dnd);
         DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
         Thread.sleep(100);
         checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);


[40/50] [abbrv] hadoop git commit: YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable resource monitoring. Contributed by Hitesh Shah

Posted by ji...@apache.org.
YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable resource monitoring. Contributed by Hitesh Shah


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b912239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b912239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b912239

Branch: refs/heads/HDFS-7285
Commit: 7b912239d7590aff2dbd3e7e5f5f7c2bfdd23e3d
Parents: eed1645
Author: Junping Du <ju...@apache.org>
Authored: Sun Mar 8 14:47:35 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:27 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../util/ResourceCalculatorProcessTree.java     | 25 ++++++++++++++------
 2 files changed, 21 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b912239/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 250fc1c..f28e932 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -366,6 +366,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2190. Added CPU and memory limit options to the default container
     executor for Windows containers. (Chuan Liu via jianhe)
 
+    YARN-3296. Mark ResourceCalculatorProcessTree class as Public for configurable
+    resource monitoring. (Hitesh Shah via junping_du)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b912239/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 8c22c9e..6ee8834 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -22,7 +22,8 @@ import java.lang.reflect.Constructor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 
@@ -30,7 +31,8 @@ import org.apache.hadoop.conf.Configured;
  * Interface class to obtain process resource usage
  *
  */
-@Private
+@Public
+@Evolving
 public abstract class ResourceCalculatorProcessTree extends Configured {
   static final Log LOG = LogFactory
       .getLog(ResourceCalculatorProcessTree.class);
@@ -90,9 +92,12 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * @param olderThanAge processes above this age are included in the
    *                      memory addition
    * @return cumulative virtual memory used by the process-tree in bytes,
-   *          for processes older than this age.
+   *          for processes older than this age. return 0 if it cannot be
+   *          calculated
    */
-  public abstract long getCumulativeVmem(int olderThanAge);
+  public long getCumulativeVmem(int olderThanAge) {
+    return 0;
+  }
 
   /**
    * Get the cumulative resident set size (rss) memory used by all the processes
@@ -104,7 +109,9 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    *          for processes older than this age. return 0 if it cannot be
    *          calculated
    */
-  public abstract long getCumulativeRssmem(int olderThanAge);
+  public long getCumulativeRssmem(int olderThanAge) {
+    return 0;
+  }
 
   /**
    * Get the CPU time in millisecond used by all the processes in the
@@ -113,7 +120,9 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * @return cumulative CPU time in millisecond since the process-tree created
    *         return 0 if it cannot be calculated
    */
-  public abstract long getCumulativeCpuTime();
+  public long getCumulativeCpuTime() {
+    return 0;
+  }
 
   /**
    * Get the CPU usage by all the processes in the process-tree based on
@@ -123,7 +132,9 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
    * @return percentage CPU usage since the process-tree was created
    *         return {@link CpuTimeTracker#UNAVAILABLE} if it cannot be calculated
    */
-  public abstract float getCpuUsagePercent();
+  public float getCpuUsagePercent() {
+    return -1;
+  }
 
   /** Verify that the tree process id is same as its process group id.
    * @return true if the process id matches else return false.


[02/50] [abbrv] hadoop git commit: HADOOP-11183. Memory-based S3AOutputstream. (Thomas Demoor via stevel)

Posted by ji...@apache.org.
HADOOP-11183. Memory-based S3AOutputstream. (Thomas Demoor via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24478c0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24478c0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24478c0a

Branch: refs/heads/HDFS-7285
Commit: 24478c0a40fe769d56daac52b8413b187dea8df2
Parents: 4006739
Author: Steve Loughran <st...@apache.org>
Authored: Tue Mar 3 16:18:39 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:22 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../src/main/resources/core-default.xml         |  20 +-
 .../org/apache/hadoop/fs/s3a/Constants.java     |   8 +
 .../hadoop/fs/s3a/S3AFastOutputStream.java      | 413 +++++++++++++++++++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  24 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  46 ++-
 .../hadoop/fs/s3a/TestS3AFastOutputStream.java  |  74 ++++
 7 files changed, 570 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 11785f2..cb5cd4d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -667,6 +667,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11620. Add support for load balancing across a group of KMS for HA.
     (Arun Suresh via wang)
 
+    HADOOP-11183. Memory-based S3AOutputstream. (Thomas Demoor via stevel)
+
   BUG FIXES
 
     HADOOP-11512. Use getTrimmedStrings when reading serialization keys

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 80dd15b..74390d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -763,13 +763,13 @@ for ldap providers in the same way as above does.
 <property>
   <name>fs.s3a.connection.establish.timeout</name>
   <value>5000</value>
-  <description>Socket connection setup timeout in seconds.</description>
+  <description>Socket connection setup timeout in milliseconds.</description>
 </property>
 
 <property>
   <name>fs.s3a.connection.timeout</name>
   <value>50000</value>
-  <description>Socket connection timeout in seconds.</description>
+  <description>Socket connection timeout in milliseconds.</description>
 </property>
 
 <property>
@@ -846,6 +846,22 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
+  <name>fs.s3a.fast.upload</name>
+  <value>false</value>
+  <description>Upload directly from memory instead of buffering to
+    disk first. Memory usage and parallelism can be controlled as up to
+    fs.s3a.multipart.size memory is consumed for each (part)upload actively
+    uploading (fs.s3a.threads.max) or queueing (fs.s3a.max.total.tasks)</description>
+</property>
+
+  <property>
+  <name>fs.s3a.fast.buffer.size</name>
+  <value>1048576</value>
+  <description>Size of initial memory buffer in bytes allocated for an
+    upload. No effect if fs.s3a.fast.upload is false.</description>
+</property>
+
+<property>
   <name>fs.s3a.impl</name>
   <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
   <description>The implementation class of the S3A Filesystem</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 1d4f67b..e7462dc 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -83,6 +83,14 @@ public class Constants {
   // comma separated list of directories
   public static final String BUFFER_DIR = "fs.s3a.buffer.dir";
 
+  // should we upload directly from memory rather than using a file buffer
+  public static final String FAST_UPLOAD = "fs.s3a.fast.upload";
+  public static final boolean DEFAULT_FAST_UPLOAD = false;
+
+  //initial size of memory buffer for a fast upload
+  public static final String FAST_BUFFER_SIZE = "fs.s3a.fast.buffer.size";
+  public static final int DEFAULT_FAST_BUFFER_SIZE = 1048576; //1MB
+
   // private | public-read | public-read-write | authenticated-read | 
   // log-delivery-write | bucket-owner-read | bucket-owner-full-control
   public static final String CANNED_ACL = "fs.s3a.acl.default";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
new file mode 100644
index 0000000..a29c47b
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
@@ -0,0 +1,413 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import com.amazonaws.AmazonClientException;
+import com.amazonaws.AmazonServiceException;
+import com.amazonaws.event.ProgressEvent;
+import com.amazonaws.event.ProgressListener;
+import com.amazonaws.services.s3.AmazonS3Client;
+import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
+import com.amazonaws.services.s3.model.CannedAccessControlList;
+import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
+import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
+import com.amazonaws.services.s3.model.ObjectMetadata;
+import com.amazonaws.services.s3.model.PartETag;
+import com.amazonaws.services.s3.model.PutObjectRequest;
+import com.amazonaws.services.s3.model.PutObjectResult;
+import com.amazonaws.services.s3.model.UploadPartRequest;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ThreadPoolExecutor;
+
+
+/**
+ * Upload files/parts asap directly from a memory buffer (instead of buffering
+ * to a file).
+ * <p/>
+ * Uploads are managed low-level rather than through the AWS TransferManager.
+ * This allows for uploading each part of a multi-part upload as soon as
+ * the bytes are in memory, rather than waiting until the file is closed.
+ * <p/>
+ * Unstable: statistics and error handling might evolve
+ */
+@InterfaceStability.Unstable
+public class S3AFastOutputStream extends OutputStream {
+
+  private static final Logger LOG = S3AFileSystem.LOG;
+  private final String key;
+  private final String bucket;
+  private final AmazonS3Client client;
+  private final int partSize;
+  private final int multiPartThreshold;
+  private final S3AFileSystem fs;
+  private final CannedAccessControlList cannedACL;
+  private final FileSystem.Statistics statistics;
+  private final String serverSideEncryptionAlgorithm;
+  private final ProgressListener progressListener;
+  private final ListeningExecutorService executorService;
+  private MultiPartUpload multiPartUpload;
+  private boolean closed;
+  private ByteArrayOutputStream buffer;
+  private int bufferLimit;
+
+
+  /**
+   * Creates a fast OutputStream that uploads to S3 from memory.
+   * For MultiPartUploads, as soon as sufficient bytes have been written to
+   * the stream a part is uploaded immediately (by using the low-level
+   * multi-part upload API on the AmazonS3Client).
+   *
+   * @param client AmazonS3Client used for S3 calls
+   * @param fs S3AFilesystem
+   * @param bucket S3 bucket name
+   * @param key S3 key name
+   * @param progress report progress in order to prevent timeouts
+   * @param statistics track FileSystem.Statistics on the performed operations
+   * @param cannedACL used CannedAccessControlList
+   * @param serverSideEncryptionAlgorithm algorithm for server side encryption
+   * @param partSize size of a single part in a multi-part upload (except
+   * last part)
+   * @param multiPartThreshold files at least this size use multi-part upload
+   * @throws IOException
+   */
+  public S3AFastOutputStream(AmazonS3Client client, S3AFileSystem fs,
+      String bucket, String key, Progressable progress,
+      FileSystem.Statistics statistics, CannedAccessControlList cannedACL,
+      String serverSideEncryptionAlgorithm, long partSize,
+      long multiPartThreshold, ThreadPoolExecutor threadPoolExecutor)
+      throws IOException {
+    this.bucket = bucket;
+    this.key = key;
+    this.client = client;
+    this.fs = fs;
+    this.cannedACL = cannedACL;
+    this.statistics = statistics;
+    this.serverSideEncryptionAlgorithm = serverSideEncryptionAlgorithm;
+    //Ensure limit as ByteArrayOutputStream size cannot exceed Integer.MAX_VALUE
+    if (partSize > Integer.MAX_VALUE) {
+      this.partSize = Integer.MAX_VALUE;
+      LOG.warn("s3a: MULTIPART_SIZE capped to ~2.14GB (maximum allowed size " +
+          "when using 'FAST_UPLOAD = true')");
+    } else {
+      this.partSize = (int) partSize;
+    }
+    if (multiPartThreshold > Integer.MAX_VALUE) {
+      this.multiPartThreshold = Integer.MAX_VALUE;
+      LOG.warn("s3a: MIN_MULTIPART_THRESHOLD capped to ~2.14GB (maximum " +
+          "allowed size when using 'FAST_UPLOAD = true')");
+    } else {
+      this.multiPartThreshold = (int) multiPartThreshold;
+    }
+    this.bufferLimit = this.multiPartThreshold;
+    this.closed = false;
+    int initialBufferSize = this.fs.getConf()
+        .getInt(Constants.FAST_BUFFER_SIZE, Constants.DEFAULT_FAST_BUFFER_SIZE);
+    if (initialBufferSize < 0) {
+      LOG.warn("s3a: FAST_BUFFER_SIZE should be a positive number. Using " +
+          "default value");
+      initialBufferSize = Constants.DEFAULT_FAST_BUFFER_SIZE;
+    } else if (initialBufferSize > this.bufferLimit) {
+      LOG.warn("s3a: automatically adjusting FAST_BUFFER_SIZE to not " +
+          "exceed MIN_MULTIPART_THRESHOLD");
+      initialBufferSize = this.bufferLimit;
+    }
+    this.buffer = new ByteArrayOutputStream(initialBufferSize);
+    this.executorService = MoreExecutors.listeningDecorator(threadPoolExecutor);
+    this.multiPartUpload = null;
+    this.progressListener = new ProgressableListener(progress);
+    if (LOG.isDebugEnabled()){
+      LOG.debug("Initialized S3AFastOutputStream for bucket '{}' key '{}'",
+          bucket, key);
+    }
+  }
+
+  /**
+   * Writes a byte to the memory buffer. If this causes the buffer to reach
+   * its limit, the actual upload is submitted to the threadpool.
+   * @param b the int of which the lowest byte is written
+   * @throws IOException
+   */
+  @Override
+  public synchronized void write(int b) throws IOException {
+    buffer.write(b);
+    if (buffer.size() == bufferLimit) {
+      uploadBuffer();
+    }
+  }
+
+  /**
+   * Writes a range of bytes from to the memory buffer. If this causes the
+   * buffer to reach its limit, the actual upload is submitted to the
+   * threadpool and the remainder of the array is written to memory
+   * (recursively).
+   * @param b byte array containing
+   * @param off offset in array where to start
+   * @param len number of bytes to be written
+   * @throws IOException
+   */
+  @Override
+  public synchronized void write(byte b[], int off, int len)
+      throws IOException {
+    if (b == null) {
+      throw new NullPointerException();
+    } else if ((off < 0) || (off > b.length) || (len < 0) ||
+        ((off + len) > b.length) || ((off + len) < 0)) {
+      throw new IndexOutOfBoundsException();
+    } else if (len == 0) {
+      return;
+    }
+    if (buffer.size() + len < bufferLimit) {
+      buffer.write(b, off, len);
+    } else {
+      int firstPart = bufferLimit - buffer.size();
+      buffer.write(b, off, firstPart);
+      uploadBuffer();
+      this.write(b, off + firstPart, len - firstPart);
+    }
+  }
+
+  private synchronized void uploadBuffer() throws IOException {
+    if (multiPartUpload == null) {
+      multiPartUpload = initiateMultiPartUpload();
+       /* Upload the existing buffer if it exceeds partSize. This possibly
+       requires multiple parts! */
+      final byte[] allBytes = buffer.toByteArray();
+      buffer = null; //earlier gc?
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Total length of initial buffer: {}", allBytes.length);
+      }
+      int processedPos = 0;
+      while ((multiPartThreshold - processedPos) >= partSize) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Initial buffer: processing from byte {} to byte {}",
+              processedPos, (processedPos + partSize - 1));
+        }
+        multiPartUpload.uploadPartAsync(new ByteArrayInputStream(allBytes,
+            processedPos, partSize), partSize);
+        processedPos += partSize;
+      }
+      //resize and reset stream
+      bufferLimit = partSize;
+      buffer = new ByteArrayOutputStream(bufferLimit);
+      buffer.write(allBytes, processedPos, multiPartThreshold - processedPos);
+    } else {
+      //upload next part
+      multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
+          .toByteArray()), partSize);
+      buffer.reset();
+    }
+  }
+
+
+  @Override
+  public synchronized void close() throws IOException {
+    if (closed) {
+      return;
+    }
+    closed = true;
+    try {
+      if (multiPartUpload == null) {
+        putObject();
+      } else {
+        if (buffer.size() > 0) {
+          //send last part
+          multiPartUpload.uploadPartAsync(new ByteArrayInputStream(buffer
+              .toByteArray()), buffer.size());
+        }
+        final List<PartETag> partETags = multiPartUpload
+            .waitForAllPartUploads();
+        multiPartUpload.complete(partETags);
+      }
+      statistics.incrementWriteOps(1);
+      // This will delete unnecessary fake parent directories
+      fs.finishedWrite(key);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Upload complete for bucket '{}' key '{}'", bucket, key);
+      }
+    } finally {
+      buffer = null;
+      super.close();
+    }
+  }
+
+  private ObjectMetadata createDefaultMetadata() {
+    ObjectMetadata om = new ObjectMetadata();
+    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
+      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
+    }
+    return om;
+  }
+
+  private MultiPartUpload initiateMultiPartUpload() throws IOException {
+    final ObjectMetadata om = createDefaultMetadata();
+    final InitiateMultipartUploadRequest initiateMPURequest =
+        new InitiateMultipartUploadRequest(bucket, key, om);
+    initiateMPURequest.setCannedACL(cannedACL);
+    try {
+      return new MultiPartUpload(
+          client.initiateMultipartUpload(initiateMPURequest).getUploadId());
+    } catch (AmazonServiceException ase) {
+      throw new IOException("Unable to initiate MultiPartUpload (server side)" +
+          ": " + ase, ase);
+    } catch (AmazonClientException ace) {
+      throw new IOException("Unable to initiate MultiPartUpload (client side)" +
+          ": " + ace, ace);
+    }
+  }
+
+  private void putObject() throws IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
+          key);
+    }
+    final ObjectMetadata om = createDefaultMetadata();
+    om.setContentLength(buffer.size());
+    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
+        new ByteArrayInputStream(buffer.toByteArray()), om);
+    putObjectRequest.setCannedAcl(cannedACL);
+    putObjectRequest.setGeneralProgressListener(progressListener);
+    ListenableFuture<PutObjectResult> putObjectResult =
+        executorService.submit(new Callable<PutObjectResult>() {
+          @Override
+          public PutObjectResult call() throws Exception {
+            return client.putObject(putObjectRequest);
+          }
+        });
+    //wait for completion
+    try {
+      putObjectResult.get();
+    } catch (InterruptedException ie) {
+      LOG.warn("Interrupted object upload:" + ie, ie);
+      Thread.currentThread().interrupt();
+    } catch (ExecutionException ee) {
+      throw new IOException("Regular upload failed", ee.getCause());
+    }
+  }
+
+  private class MultiPartUpload {
+    private final String uploadId;
+    private final List<ListenableFuture<PartETag>> partETagsFutures;
+
+    public MultiPartUpload(String uploadId) {
+      this.uploadId = uploadId;
+      this.partETagsFutures = new ArrayList<ListenableFuture<PartETag>>();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Initiated multi-part upload for bucket '{}' key '{}' with " +
+            "id '{}'", bucket, key, uploadId);
+      }
+    }
+
+    public void uploadPartAsync(ByteArrayInputStream inputStream,
+        int partSize) {
+      final int currentPartNumber = partETagsFutures.size() + 1;
+      final UploadPartRequest request =
+          new UploadPartRequest().withBucketName(bucket).withKey(key)
+              .withUploadId(uploadId).withInputStream(inputStream)
+              .withPartNumber(currentPartNumber).withPartSize(partSize);
+      request.setGeneralProgressListener(progressListener);
+      ListenableFuture<PartETag> partETagFuture =
+          executorService.submit(new Callable<PartETag>() {
+            @Override
+            public PartETag call() throws Exception {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
+                    uploadId);
+              }
+              return client.uploadPart(request).getPartETag();
+            }
+          });
+      partETagsFutures.add(partETagFuture);
+    }
+
+    public List<PartETag> waitForAllPartUploads() throws IOException {
+      try {
+        return Futures.allAsList(partETagsFutures).get();
+      } catch (InterruptedException ie) {
+        LOG.warn("Interrupted partUpload:" + ie, ie);
+        Thread.currentThread().interrupt();
+      } catch (ExecutionException ee) {
+        //there is no way of recovering so abort
+        //cancel all partUploads
+        for (ListenableFuture<PartETag> future : partETagsFutures) {
+          future.cancel(true);
+        }
+        //abort multipartupload
+        this.abort();
+        throw new IOException("Part upload failed in multi-part upload with " +
+            "id '" +uploadId + "':" + ee, ee);
+      }
+      //should not happen?
+      return null;
+    }
+
+    public void complete(List<PartETag> partETags) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
+            uploadId);
+      }
+      final CompleteMultipartUploadRequest completeRequest =
+          new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
+      client.completeMultipartUpload(completeRequest);
+
+    }
+
+    public void abort() {
+      LOG.warn("Aborting multi-part upload with id '{}'", uploadId);
+      try {
+        client.abortMultipartUpload(new AbortMultipartUploadRequest(bucket,
+            key, uploadId));
+      } catch (Exception e2) {
+        LOG.warn("Unable to abort multipart upload, you may need to purge  " +
+            "uploaded parts: " + e2, e2);
+      }
+    }
+  }
+
+  private static class ProgressableListener implements ProgressListener {
+    private final Progressable progress;
+
+    public ProgressableListener(Progressable progress) {
+      this.progress = progress;
+    }
+
+    public void progressChanged(ProgressEvent progressEvent) {
+      if (progress != null) {
+        progress.progress();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 2373e7e..1a30d6f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -88,7 +88,8 @@ public class S3AFileSystem extends FileSystem {
   private int maxKeys;
   private long partSize;
   private TransferManager transfers;
-  private int partSizeThreshold;
+  private ThreadPoolExecutor threadPoolExecutor;
+  private int multiPartThreshold;
   public static final Logger LOG = LoggerFactory.getLogger(S3AFileSystem.class);
   private CannedAccessControlList cannedACL;
   private String serverSideEncryptionAlgorithm;
@@ -237,7 +238,7 @@ public class S3AFileSystem extends FileSystem {
 
     maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
     partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
-    partSizeThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, 
+    multiPartThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD,
       DEFAULT_MIN_MULTIPART_THRESHOLD);
 
     if (partSize < 5 * 1024 * 1024) {
@@ -245,9 +246,9 @@ public class S3AFileSystem extends FileSystem {
       partSize = 5 * 1024 * 1024;
     }
 
-    if (partSizeThreshold < 5 * 1024 * 1024) {
+    if (multiPartThreshold < 5 * 1024 * 1024) {
       LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
-      partSizeThreshold = 5 * 1024 * 1024;
+      multiPartThreshold = 5 * 1024 * 1024;
     }
 
     int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
@@ -262,20 +263,20 @@ public class S3AFileSystem extends FileSystem {
     LinkedBlockingQueue<Runnable> workQueue =
       new LinkedBlockingQueue<>(maxThreads *
         conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS));
-    ThreadPoolExecutor tpe = new ThreadPoolExecutor(
+    threadPoolExecutor = new ThreadPoolExecutor(
         coreThreads,
         maxThreads,
         keepAliveTime,
         TimeUnit.SECONDS,
         workQueue,
         newDaemonThreadFactory("s3a-transfer-shared-"));
-    tpe.allowCoreThreadTimeOut(true);
+    threadPoolExecutor.allowCoreThreadTimeOut(true);
 
     TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
     transferConfiguration.setMinimumUploadPartSize(partSize);
-    transferConfiguration.setMultipartUploadThreshold(partSizeThreshold);
+    transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
 
-    transfers = new TransferManager(s3, tpe);
+    transfers = new TransferManager(s3, threadPoolExecutor);
     transfers.setConfiguration(transferConfiguration);
 
     String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL);
@@ -391,7 +392,12 @@ public class S3AFileSystem extends FileSystem {
     if (!overwrite && exists(f)) {
       throw new FileAlreadyExistsException(f + " already exists");
     }
-
+    if (getConf().getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD)) {
+      return new FSDataOutputStream(new S3AFastOutputStream(s3, this, bucket,
+          key, progress, statistics, cannedACL,
+          serverSideEncryptionAlgorithm, partSize, (long)multiPartThreshold,
+          threadPoolExecutor), statistics);
+    }
     // We pass null to FSDataOutputStream so it won't count writes that are being buffered to a file
     return new FSDataOutputStream(new S3AOutputStream(getConf(), transfers, this,
       bucket, key, progress, cannedACL, statistics, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 8e80b92..bf62634 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -213,13 +213,13 @@ If you do any of these: change your credentials immediately!
     <property>
       <name>fs.s3a.connection.establish.timeout</name>
       <value>5000</value>
-      <description>Socket connection setup timeout in seconds.</description>
+      <description>Socket connection setup timeout in milliseconds.</description>
     </property>
 
     <property>
       <name>fs.s3a.connection.timeout</name>
       <value>50000</value>
-      <description>Socket connection timeout in seconds.</description>
+      <description>Socket connection timeout in milliseconds.</description>
     </property>
 
     <property>
@@ -292,7 +292,7 @@ If you do any of these: change your credentials immediately!
       <name>fs.s3a.buffer.dir</name>
       <value>${hadoop.tmp.dir}/s3a</value>
       <description>Comma separated list of directories that will be used to buffer file
-        uploads to.</description>
+        uploads to. No effect if fs.s3a.fast.upload is true.</description>
     </property>
 
     <property>
@@ -301,6 +301,40 @@ If you do any of these: change your credentials immediately!
       <description>The implementation class of the S3A Filesystem</description>
     </property>
 
+### S3AFastOutputStream
+ **Warning: NEW in hadoop 2.7. UNSTABLE, EXPERIMENTAL: use at own risk**
+
+    <property>
+      <name>fs.s3a.fast.upload</name>
+      <value>false</value>
+      <description>Upload directly from memory instead of buffering to
+      disk first. Memory usage and parallelism can be controlled as up to
+      fs.s3a.multipart.size memory is consumed for each (part)upload actively
+      uploading (fs.s3a.threads.max) or queueing (fs.s3a.max.total.tasks)</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.fast.buffer.size</name>
+      <value>1048576</value>
+      <description>Size (in bytes) of initial memory buffer allocated for an
+      upload. No effect if fs.s3a.fast.upload is false.</description>
+    </property>
+
+Writes are buffered in memory instead of to a file on local disk. This
+removes the throughput bottleneck of the local disk write and read cycle
+before starting the actual upload. Furthermore, it allows handling files that
+are larger than the remaining local disk space.
+
+However, non-trivial memory tuning is needed for optimal results and careless
+settings could cause memory overflow. Up to `fs.s3a.threads.max` parallel
+(part)uploads are active. Furthermore, up to `fs.s3a.max.total.tasks`
+additional part(uploads) can be waiting (and thus memory buffers are created).
+The memory buffer is uploaded as a single upload if it is not larger than
+`fs.s3a.multipart.threshold`. Else, a multi-part upload is initiatated and
+parts of size `fs.s3a.multipart.size` are used to protect against overflowing
+the available memory. These settings should be tuned to the envisioned
+workflow (some large files, many small ones, ...) and the physical
+limitations of the machine and cluster (memory, network bandwidth).
 
 ## Testing the S3 filesystem clients
 
@@ -334,7 +368,7 @@ each filesystem for its testing.
 The contents of each bucket will be destroyed during the test process:
 do not use the bucket for any purpose other than testing. Furthermore, for
 s3a, all in-progress multi-part uploads to the bucket will be aborted at the
-start of a test (by forcing fs.s3a.multipart.purge=true) to clean up the
+start of a test (by forcing `fs.s3a.multipart.purge=true`) to clean up the
 temporary state of previously failed tests.
 
 Example:
@@ -392,14 +426,14 @@ Example:
 ## File `contract-test-options.xml`
 
 The file `hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml`
-must be created and configured for the test fileystems.
+must be created and configured for the test filesystems.
 
 If a specific file `fs.contract.test.fs.*` test path is not defined for
 any of the filesystems, those tests will be skipped.
 
 The standard S3 authentication details must also be provided. This can be
 through copy-and-paste of the `auth-keys.xml` credentials, or it can be
-through direct XInclude inclustion.
+through direct XInclude inclusion.
 
 #### s3://
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24478c0a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFastOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFastOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFastOutputStream.java
new file mode 100644
index 0000000..e507cf6
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFastOutputStream.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+
+import java.io.IOException;
+
+/**
+ * Tests regular and multi-part upload functionality for S3AFastOutputStream.
+ * File sizes are kept small to reduce test duration on slow connections
+ */
+public class TestS3AFastOutputStream {
+  private FileSystem fs;
+
+
+  @Rule
+  public Timeout testTimeout = new Timeout(30 * 60 * 1000);
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setLong(Constants.MIN_MULTIPART_THRESHOLD, 5 * 1024 * 1024);
+    conf.setInt(Constants.MULTIPART_SIZE, 5 * 1024 * 1024);
+    conf.setBoolean(Constants.FAST_UPLOAD, true);
+    fs = S3ATestUtils.createTestFileSystem(conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (fs != null) {
+      fs.delete(getTestPath(), true);
+    }
+  }
+
+  protected Path getTestPath() {
+    return new Path("/tests3a");
+  }
+
+  @Test
+  public void testRegularUpload() throws IOException {
+    ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 1024 * 1024);
+  }
+
+  @Test
+  public void testMultiPartUpload() throws IOException {
+    ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 6 * 1024 *
+        1024);
+  }
+}


[32/50] [abbrv] hadoop git commit: HDFS-7855. Separate class Packet from DFSOutputStream. Contributed by Li Bo.

Posted by ji...@apache.org.
HDFS-7855. Separate class Packet from DFSOutputStream. Contributed by Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8bb732f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8bb732f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8bb732f

Branch: refs/heads/HDFS-7285
Commit: d8bb732fa7ddd4f4a55aead7160a8b6290b9446d
Parents: 7a638ed
Author: Jing Zhao <ji...@apache.org>
Authored: Thu Mar 5 10:57:48 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 238 +++-------------
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  | 270 +++++++++++++++++++
 .../org/apache/hadoop/hdfs/TestDFSPacket.java   |  68 +++++
 4 files changed, 381 insertions(+), 197 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8bb732f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 59f69fb..763d327 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -715,6 +715,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
     (Liang Xie via ozawa)
 
+    HDFS-7855. Separate class Packet from DFSOutputStream. (Li Bo bia jing9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8bb732f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
old mode 100644
new mode 100755
index dc2f674..130bb6e
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -30,7 +30,6 @@ import java.io.OutputStream;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.nio.BufferOverflowException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -79,7 +78,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -160,9 +158,9 @@ public class DFSOutputStream extends FSOutputSummer
   private final int bytesPerChecksum; 
 
   // both dataQueue and ackQueue are protected by dataQueue lock
-  private final LinkedList<Packet> dataQueue = new LinkedList<Packet>();
-  private final LinkedList<Packet> ackQueue = new LinkedList<Packet>();
-  private Packet currentPacket = null;
+  private final LinkedList<DFSPacket> dataQueue = new LinkedList<DFSPacket>();
+  private final LinkedList<DFSPacket> ackQueue = new LinkedList<DFSPacket>();
+  private DFSPacket currentPacket = null;
   private DataStreamer streamer;
   private long currentSeqno = 0;
   private long lastQueuedSeqno = -1;
@@ -187,8 +185,8 @@ public class DFSOutputStream extends FSOutputSummer
       BlockStoragePolicySuite.createDefaultSuite();
 
   /** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
-  private Packet createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
-      long seqno) throws InterruptedIOException {
+  private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
+      long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
     final byte[] buf;
     final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;
 
@@ -201,171 +199,20 @@ public class DFSOutputStream extends FSOutputSummer
       throw iioe;
     }
 
-    return new Packet(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize());
+    return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
+                         getChecksumSize(), lastPacketInBlock);
   }
 
   /**
    * For heartbeat packets, create buffer directly by new byte[]
    * since heartbeats should not be blocked.
    */
-  private Packet createHeartbeatPacket() throws InterruptedIOException {
+  private DFSPacket createHeartbeatPacket() throws InterruptedIOException {
     final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN];
-    return new Packet(buf, 0, 0, Packet.HEART_BEAT_SEQNO, getChecksumSize());
+    return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO,
+                         getChecksumSize(), false);
   }
 
-  private static class Packet {
-    private static final long HEART_BEAT_SEQNO = -1L;
-    final long seqno; // sequencenumber of buffer in block
-    final long offsetInBlock; // offset in block
-    boolean syncBlock; // this packet forces the current block to disk
-    int numChunks; // number of chunks currently in packet
-    final int maxChunks; // max chunks in packet
-    private byte[] buf;
-    private boolean lastPacketInBlock; // is this the last packet in block?
-
-    /**
-     * buf is pointed into like follows:
-     *  (C is checksum data, D is payload data)
-     *
-     * [_________CCCCCCCCC________________DDDDDDDDDDDDDDDD___]
-     *           ^        ^               ^               ^
-     *           |        checksumPos     dataStart       dataPos
-     *           checksumStart
-     * 
-     * Right before sending, we move the checksum data to immediately precede
-     * the actual data, and then insert the header into the buffer immediately
-     * preceding the checksum data, so we make sure to keep enough space in
-     * front of the checksum data to support the largest conceivable header. 
-     */
-    int checksumStart;
-    int checksumPos;
-    final int dataStart;
-    int dataPos;
-
-    /**
-     * Create a new packet.
-     * 
-     * @param chunksPerPkt maximum number of chunks per packet.
-     * @param offsetInBlock offset in bytes into the HDFS block.
-     */
-    private Packet(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
-        int checksumSize) {
-      this.lastPacketInBlock = false;
-      this.numChunks = 0;
-      this.offsetInBlock = offsetInBlock;
-      this.seqno = seqno;
-
-      this.buf = buf;
-
-      checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
-      checksumPos = checksumStart;
-      dataStart = checksumStart + (chunksPerPkt * checksumSize);
-      dataPos = dataStart;
-      maxChunks = chunksPerPkt;
-    }
-
-    synchronized void writeData(byte[] inarray, int off, int len)
-        throws ClosedChannelException {
-      checkBuffer();
-      if (dataPos + len > buf.length) {
-        throw new BufferOverflowException();
-      }
-      System.arraycopy(inarray, off, buf, dataPos, len);
-      dataPos += len;
-    }
-
-    synchronized void writeChecksum(byte[] inarray, int off, int len)
-        throws ClosedChannelException {
-      checkBuffer();
-      if (len == 0) {
-        return;
-      }
-      if (checksumPos + len > dataStart) {
-        throw new BufferOverflowException();
-      }
-      System.arraycopy(inarray, off, buf, checksumPos, len);
-      checksumPos += len;
-    }
-    
-    /**
-     * Write the full packet, including the header, to the given output stream.
-     */
-    synchronized void writeTo(DataOutputStream stm) throws IOException {
-      checkBuffer();
-
-      final int dataLen = dataPos - dataStart;
-      final int checksumLen = checksumPos - checksumStart;
-      final int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
-
-      PacketHeader header = new PacketHeader(
-        pktLen, offsetInBlock, seqno, lastPacketInBlock, dataLen, syncBlock);
-      
-      if (checksumPos != dataStart) {
-        // Move the checksum to cover the gap. This can happen for the last
-        // packet or during an hflush/hsync call.
-        System.arraycopy(buf, checksumStart, buf, 
-                         dataStart - checksumLen , checksumLen); 
-        checksumPos = dataStart;
-        checksumStart = checksumPos - checksumLen;
-      }
-      
-      final int headerStart = checksumStart - header.getSerializedSize();
-      assert checksumStart + 1 >= header.getSerializedSize();
-      assert checksumPos == dataStart;
-      assert headerStart >= 0;
-      assert headerStart + header.getSerializedSize() == checksumStart;
-      
-      // Copy the header data into the buffer immediately preceding the checksum
-      // data.
-      System.arraycopy(header.getBytes(), 0, buf, headerStart,
-          header.getSerializedSize());
-      
-      // corrupt the data for testing.
-      if (DFSClientFaultInjector.get().corruptPacket()) {
-        buf[headerStart+header.getSerializedSize() + checksumLen + dataLen-1] ^= 0xff;
-      }
-
-      // Write the now contiguous full packet to the output stream.
-      stm.write(buf, headerStart, header.getSerializedSize() + checksumLen + dataLen);
-
-      // undo corruption.
-      if (DFSClientFaultInjector.get().uncorruptPacket()) {
-        buf[headerStart+header.getSerializedSize() + checksumLen + dataLen-1] ^= 0xff;
-      }
-    }
-
-    private synchronized void checkBuffer() throws ClosedChannelException {
-      if (buf == null) {
-        throw new ClosedChannelException();
-      }
-    }
-
-    private synchronized void releaseBuffer(ByteArrayManager bam) {
-      bam.release(buf);
-      buf = null;
-    }
-
-    // get the packet's last byte's offset in the block
-    synchronized long getLastByteOffsetBlock() {
-      return offsetInBlock + dataPos - dataStart;
-    }
-
-    /**
-     * Check if this packet is a heart beat packet
-     * @return true if the sequence number is HEART_BEAT_SEQNO
-     */
-    private boolean isHeartbeatPacket() {
-      return seqno == HEART_BEAT_SEQNO;
-    }
-    
-    @Override
-    public String toString() {
-      return "packet seqno: " + this.seqno +
-      " offsetInBlock: " + this.offsetInBlock +
-      " lastPacketInBlock: " + this.lastPacketInBlock +
-      " lastByteOffsetInBlock: " + this.getLastByteOffsetBlock();
-    }
-  }
 
   //
   // The DataStreamer class is responsible for sending data packets to the
@@ -556,7 +403,7 @@ public class DFSOutputStream extends FSOutputSummer
           }
         }
 
-        Packet one;
+        DFSPacket one;
         try {
           // process datanode IO errors if any
           boolean doSleep = false;
@@ -620,7 +467,7 @@ public class DFSOutputStream extends FSOutputSummer
                 " Aborting file " + src);
           }
 
-          if (one.lastPacketInBlock) {
+          if (one.isLastPacketInBlock()) {
             // wait for all data packets have been successfully acked
             synchronized (dataQueue) {
               while (!streamerClosed && !hasError && 
@@ -681,7 +528,7 @@ public class DFSOutputStream extends FSOutputSummer
           }
 
           // Is this block full?
-          if (one.lastPacketInBlock) {
+          if (one.isLastPacketInBlock()) {
             // wait for the close packet has been acked
             synchronized (dataQueue) {
               while (!streamerClosed && !hasError && 
@@ -883,7 +730,7 @@ public class DFSOutputStream extends FSOutputSummer
             ack.readFields(blockReplyStream);
             long duration = Time.monotonicNow() - begin;
             if (duration > dfsclientSlowLogThresholdMs
-                && ack.getSeqno() != Packet.HEART_BEAT_SEQNO) {
+                && ack.getSeqno() != DFSPacket.HEART_BEAT_SEQNO) {
               DFSClient.LOG
                   .warn("Slow ReadProcessor read fields took " + duration
                       + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: "
@@ -920,21 +767,21 @@ public class DFSOutputStream extends FSOutputSummer
             
             assert seqno != PipelineAck.UNKOWN_SEQNO : 
               "Ack for unknown seqno should be a failed ack: " + ack;
-            if (seqno == Packet.HEART_BEAT_SEQNO) {  // a heartbeat ack
+            if (seqno == DFSPacket.HEART_BEAT_SEQNO) {  // a heartbeat ack
               continue;
             }
 
             // a success ack for a data packet
-            Packet one;
+            DFSPacket one;
             synchronized (dataQueue) {
               one = ackQueue.getFirst();
             }
-            if (one.seqno != seqno) {
+            if (one.getSeqno() != seqno) {
               throw new IOException("ResponseProcessor: Expecting seqno " +
                                     " for block " + block +
-                                    one.seqno + " but received " + seqno);
+                                    one.getSeqno() + " but received " + seqno);
             }
-            isLastPacketInBlock = one.lastPacketInBlock;
+            isLastPacketInBlock = one.isLastPacketInBlock();
 
             // Fail the packet write for testing in order to force a
             // pipeline recovery.
@@ -1032,10 +879,10 @@ public class DFSOutputStream extends FSOutputSummer
           // We also need to set lastAckedSeqno to the end-of-block Packet's seqno, so that
           // a client waiting on close() will be aware that the flush finished.
           synchronized (dataQueue) {
-            Packet endOfBlockPacket = dataQueue.remove();  // remove the end of block packet
-            assert endOfBlockPacket.lastPacketInBlock;
-            assert lastAckedSeqno == endOfBlockPacket.seqno - 1;
-            lastAckedSeqno = endOfBlockPacket.seqno;
+            DFSPacket endOfBlockPacket = dataQueue.remove();  // remove the end of block packet
+            assert endOfBlockPacket.isLastPacketInBlock();
+            assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
+            lastAckedSeqno = endOfBlockPacket.getSeqno();
             dataQueue.notifyAll();
           }
           endBlock();
@@ -1862,9 +1709,9 @@ public class DFSOutputStream extends FSOutputSummer
     synchronized (dataQueue) {
       if (currentPacket == null) return;
       dataQueue.addLast(currentPacket);
-      lastQueuedSeqno = currentPacket.seqno;
+      lastQueuedSeqno = currentPacket.getSeqno();
       if (DFSClient.LOG.isDebugEnabled()) {
-        DFSClient.LOG.debug("Queued packet " + currentPacket.seqno);
+        DFSClient.LOG.debug("Queued packet " + currentPacket.getSeqno());
       }
       currentPacket = null;
       dataQueue.notifyAll();
@@ -1916,10 +1763,10 @@ public class DFSOutputStream extends FSOutputSummer
 
     if (currentPacket == null) {
       currentPacket = createPacket(packetSize, chunksPerPacket, 
-          bytesCurBlock, currentSeqno++);
+          bytesCurBlock, currentSeqno++, false);
       if (DFSClient.LOG.isDebugEnabled()) {
         DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno=" + 
-            currentPacket.seqno +
+            currentPacket.getSeqno() +
             ", src=" + src +
             ", packetSize=" + packetSize +
             ", chunksPerPacket=" + chunksPerPacket +
@@ -1929,16 +1776,16 @@ public class DFSOutputStream extends FSOutputSummer
 
     currentPacket.writeChecksum(checksum, ckoff, cklen);
     currentPacket.writeData(b, offset, len);
-    currentPacket.numChunks++;
+    currentPacket.incNumChunks();
     bytesCurBlock += len;
 
     // If packet is full, enqueue it for transmission
     //
-    if (currentPacket.numChunks == currentPacket.maxChunks ||
+    if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
         bytesCurBlock == blockSize) {
       if (DFSClient.LOG.isDebugEnabled()) {
         DFSClient.LOG.debug("DFSClient writeChunk packet full seqno=" +
-            currentPacket.seqno +
+            currentPacket.getSeqno() +
             ", src=" + src +
             ", bytesCurBlock=" + bytesCurBlock +
             ", blockSize=" + blockSize +
@@ -1963,9 +1810,8 @@ public class DFSOutputStream extends FSOutputSummer
       // indicate the end of block and reset bytesCurBlock.
       //
       if (bytesCurBlock == blockSize) {
-        currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++);
-        currentPacket.lastPacketInBlock = true;
-        currentPacket.syncBlock = shouldSyncBlock;
+        currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true);
+        currentPacket.setSyncBlock(shouldSyncBlock);
         waitAndQueueCurrentPacket();
         bytesCurBlock = 0;
         lastFlushOffset = 0;
@@ -2053,7 +1899,7 @@ public class DFSOutputStream extends FSOutputSummer
             // but sync was requested.
             // Send an empty packet if we do not end the block right now
             currentPacket = createPacket(packetSize, chunksPerPacket,
-                bytesCurBlock, currentSeqno++);
+                bytesCurBlock, currentSeqno++, false);
           }
         } else {
           if (isSync && bytesCurBlock > 0 && !endBlock) {
@@ -2062,7 +1908,7 @@ public class DFSOutputStream extends FSOutputSummer
             // and sync was requested.
             // So send an empty sync packet if we do not end the block right now
             currentPacket = createPacket(packetSize, chunksPerPacket,
-                bytesCurBlock, currentSeqno++);
+                bytesCurBlock, currentSeqno++, false);
           } else if (currentPacket != null) {
             // just discard the current packet since it is already been sent.
             currentPacket.releaseBuffer(byteArrayManager);
@@ -2070,15 +1916,14 @@ public class DFSOutputStream extends FSOutputSummer
           }
         }
         if (currentPacket != null) {
-          currentPacket.syncBlock = isSync;
+          currentPacket.setSyncBlock(isSync);
           waitAndQueueCurrentPacket();          
         }
         if (endBlock && bytesCurBlock > 0) {
           // Need to end the current block, thus send an empty packet to
           // indicate this is the end of the block and reset bytesCurBlock
-          currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++);
-          currentPacket.lastPacketInBlock = true;
-          currentPacket.syncBlock = shouldSyncBlock || isSync;
+          currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true);
+          currentPacket.setSyncBlock(shouldSyncBlock || isSync);
           waitAndQueueCurrentPacket();
           bytesCurBlock = 0;
           lastFlushOffset = 0;
@@ -2249,8 +2094,8 @@ public class DFSOutputStream extends FSOutputSummer
     }
   }
   
-  private static void releaseBuffer(List<Packet> packets, ByteArrayManager bam) {
-    for(Packet p : packets) {
+  private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam) {
+    for (DFSPacket p : packets) {
       p.releaseBuffer(bam);
     }
     packets.clear();
@@ -2297,9 +2142,8 @@ public class DFSOutputStream extends FSOutputSummer
 
       if (bytesCurBlock != 0) {
         // send an empty packet to mark the end of the block
-        currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++);
-        currentPacket.lastPacketInBlock = true;
-        currentPacket.syncBlock = shouldSyncBlock;
+        currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true);
+        currentPacket.setSyncBlock(shouldSyncBlock);
       }
 
       flushInternal();             // flush all data to Datanodes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8bb732f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
new file mode 100755
index 0000000..9b3ea51
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.BufferOverflowException;
+import java.nio.channels.ClosedChannelException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.util.ByteArrayManager;
+
+/****************************************************************
+ * DFSPacket is used by DataStreamer and DFSOutputStream.
+ * DFSOutputStream generates packets and then ask DatStreamer
+ * to send them to datanodes.
+ ****************************************************************/
+
+class DFSPacket {
+  public static final long HEART_BEAT_SEQNO = -1L;
+  private final long seqno; // sequence number of buffer in block
+  private final long offsetInBlock; // offset in block
+  private boolean syncBlock; // this packet forces the current block to disk
+  private int numChunks; // number of chunks currently in packet
+  private final int maxChunks; // max chunks in packet
+  private byte[] buf;
+  private final boolean lastPacketInBlock; // is this the last packet in block?
+
+  /**
+   * buf is pointed into like follows:
+   *  (C is checksum data, D is payload data)
+   *
+   * [_________CCCCCCCCC________________DDDDDDDDDDDDDDDD___]
+   *           ^        ^               ^               ^
+   *           |        checksumPos     dataStart       dataPos
+   *           checksumStart
+   *
+   * Right before sending, we move the checksum data to immediately precede
+   * the actual data, and then insert the header into the buffer immediately
+   * preceding the checksum data, so we make sure to keep enough space in
+   * front of the checksum data to support the largest conceivable header.
+   */
+  private int checksumStart;
+  private int checksumPos;
+  private final int dataStart;
+  private int dataPos;
+
+  /**
+   * Create a new packet.
+   *
+   * @param buf the buffer storing data and checksums
+   * @param chunksPerPkt maximum number of chunks per packet.
+   * @param offsetInBlock offset in bytes into the HDFS block.
+   * @param seqno the sequence number of this packet
+   * @param checksumSize the size of checksum
+   * @param lastPacketInBlock if this is the last packet
+   */
+  DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
+                   int checksumSize, boolean lastPacketInBlock) {
+    this.lastPacketInBlock = lastPacketInBlock;
+    this.numChunks = 0;
+    this.offsetInBlock = offsetInBlock;
+    this.seqno = seqno;
+
+    this.buf = buf;
+
+    checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
+    checksumPos = checksumStart;
+    dataStart = checksumStart + (chunksPerPkt * checksumSize);
+    dataPos = dataStart;
+    maxChunks = chunksPerPkt;
+  }
+
+  /**
+   * Write data to this packet.
+   *
+   * @param inarray input array of data
+   * @param off the offset of data to write
+   * @param len the length of data to write
+   * @throws ClosedChannelException
+   */
+  synchronized void writeData(byte[] inarray, int off, int len)
+      throws ClosedChannelException {
+    checkBuffer();
+    if (dataPos + len > buf.length) {
+      throw new BufferOverflowException();
+    }
+    System.arraycopy(inarray, off, buf, dataPos, len);
+    dataPos += len;
+  }
+
+  /**
+   * Write checksums to this packet
+   *
+   * @param inarray input array of checksums
+   * @param off the offset of checksums to write
+   * @param len the length of checksums to write
+   * @throws ClosedChannelException
+   */
+  synchronized void writeChecksum(byte[] inarray, int off, int len)
+      throws ClosedChannelException {
+    checkBuffer();
+    if (len == 0) {
+      return;
+    }
+    if (checksumPos + len > dataStart) {
+      throw new BufferOverflowException();
+    }
+    System.arraycopy(inarray, off, buf, checksumPos, len);
+    checksumPos += len;
+  }
+
+  /**
+   * Write the full packet, including the header, to the given output stream.
+   *
+   * @param stm
+   * @throws IOException
+   */
+  synchronized void writeTo(DataOutputStream stm) throws IOException {
+    checkBuffer();
+
+    final int dataLen = dataPos - dataStart;
+    final int checksumLen = checksumPos - checksumStart;
+    final int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
+
+    PacketHeader header = new PacketHeader(
+        pktLen, offsetInBlock, seqno, lastPacketInBlock, dataLen, syncBlock);
+
+    if (checksumPos != dataStart) {
+      // Move the checksum to cover the gap. This can happen for the last
+      // packet or during an hflush/hsync call.
+      System.arraycopy(buf, checksumStart, buf,
+          dataStart - checksumLen , checksumLen);
+      checksumPos = dataStart;
+      checksumStart = checksumPos - checksumLen;
+    }
+
+    final int headerStart = checksumStart - header.getSerializedSize();
+    assert checksumStart + 1 >= header.getSerializedSize();
+    assert headerStart >= 0;
+    assert headerStart + header.getSerializedSize() == checksumStart;
+
+    // Copy the header data into the buffer immediately preceding the checksum
+    // data.
+    System.arraycopy(header.getBytes(), 0, buf, headerStart,
+        header.getSerializedSize());
+
+    // corrupt the data for testing.
+    if (DFSClientFaultInjector.get().corruptPacket()) {
+      buf[headerStart+header.getSerializedSize() + checksumLen + dataLen-1] ^= 0xff;
+    }
+
+    // Write the now contiguous full packet to the output stream.
+    stm.write(buf, headerStart, header.getSerializedSize() + checksumLen + dataLen);
+
+    // undo corruption.
+    if (DFSClientFaultInjector.get().uncorruptPacket()) {
+      buf[headerStart+header.getSerializedSize() + checksumLen + dataLen-1] ^= 0xff;
+    }
+  }
+
+  private synchronized void checkBuffer() throws ClosedChannelException {
+    if (buf == null) {
+      throw new ClosedChannelException();
+    }
+  }
+
+  /**
+   * Release the buffer in this packet to ByteArrayManager.
+   *
+   * @param bam
+   */
+  synchronized void releaseBuffer(ByteArrayManager bam) {
+    bam.release(buf);
+    buf = null;
+  }
+
+  /**
+   * get the packet's last byte's offset in the block
+   *
+   * @return the packet's last byte's offset in the block
+   */
+  synchronized long getLastByteOffsetBlock() {
+    return offsetInBlock + dataPos - dataStart;
+  }
+
+  /**
+   * Check if this packet is a heart beat packet
+   *
+   * @return true if the sequence number is HEART_BEAT_SEQNO
+   */
+  boolean isHeartbeatPacket() {
+    return seqno == HEART_BEAT_SEQNO;
+  }
+
+  /**
+   * check if this packet is the last packet in block
+   *
+   * @return true if the packet is the last packet
+   */
+  boolean isLastPacketInBlock(){
+    return lastPacketInBlock;
+  }
+
+  /**
+   * get sequence number of this packet
+   *
+   * @return the sequence number of this packet
+   */
+  long getSeqno(){
+    return seqno;
+  }
+
+  /**
+   * get the number of chunks this packet contains
+   *
+   * @return the number of chunks in this packet
+   */
+  synchronized int getNumChunks(){
+    return numChunks;
+  }
+
+  /**
+   * increase the number of chunks by one
+   */
+  synchronized void incNumChunks(){
+    numChunks++;
+  }
+
+  /**
+   * get the maximum number of packets
+   *
+   * @return the maximum number of packets
+   */
+  int getMaxChunks(){
+    return maxChunks;
+  }
+
+  /**
+   * set if to sync block
+   *
+   * @param syncBlock if to sync block
+   */
+  synchronized void setSyncBlock(boolean syncBlock){
+    this.syncBlock = syncBlock;
+  }
+
+  @Override
+  public String toString() {
+    return "packet seqno: " + this.seqno +
+        " offsetInBlock: " + this.offsetInBlock +
+        " lastPacketInBlock: " + this.lastPacketInBlock +
+        " lastByteOffsetInBlock: " + this.getLastByteOffsetBlock();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8bb732f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
new file mode 100755
index 0000000..8bf6097
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.Random;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestDFSPacket {
+  private static final int chunkSize = 512;
+  private static final int checksumSize = 4;
+  private static final int maxChunksPerPacket = 4;
+
+  @Test
+  public void testPacket() throws Exception {
+    Random r = new Random(12345L);
+    byte[] data =  new byte[chunkSize];
+    r.nextBytes(data);
+    byte[] checksum = new byte[checksumSize];
+    r.nextBytes(checksum);
+
+    DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);
+
+    byte[] packetBuf = new byte[data.length * 2];
+    DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
+                                0, 0, checksumSize, false);
+    p.setSyncBlock(true);
+    p.writeData(data, 0, data.length);
+    p.writeChecksum(checksum, 0, checksum.length);
+    p.writeTo(os);
+
+    //we have set syncBlock to true, so the header has the maximum length
+    int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
+    byte[] readBuf = os.getData();
+
+    assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
+    assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);
+
+  }
+
+  public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2,
+                                             int off2, int len) {
+    for (int i = 0; i < len; i++) {
+      if (buf1[off1 + i] != buf2[off2 + i]) {
+        Assert.fail("arrays differ at byte " + i + ". " +
+            "The first array has " + (int) buf1[off1 + i] +
+            ", but the second array has " + (int) buf2[off2 + i]);
+      }
+    }
+  }
+}


[08/50] [abbrv] hadoop git commit: HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.

Posted by ji...@apache.org.
HDFS-7535. Utilize Snapshot diff report for distcp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39535ec7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39535ec7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39535ec7

Branch: refs/heads/HDFS-7285
Commit: 39535ec788aee029e31b42ae666ecf516215d10c
Parents: bf3604b
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Mar 4 10:30:53 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../org/apache/hadoop/tools/CopyListing.java    |   4 +-
 .../java/org/apache/hadoop/tools/DiffInfo.java  |  90 +++++
 .../java/org/apache/hadoop/tools/DistCp.java    |  16 +-
 .../apache/hadoop/tools/DistCpConstants.java    |   3 +
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  12 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |  34 ++
 .../org/apache/hadoop/tools/DistCpSync.java     | 192 ++++++++++
 .../org/apache/hadoop/tools/OptionsParser.java  |  24 +-
 .../hadoop/tools/mapred/CopyCommitter.java      |   3 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 349 +++++++++++++++++++
 .../apache/hadoop/tools/TestOptionsParser.java  |  75 +++-
 12 files changed, 790 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 62006d3..3c6d447 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -704,6 +704,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
     (gera)
 
+    HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index a7b68a9..e3c58e9 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -224,7 +224,9 @@ public abstract class CopyListing extends Configured {
                                            Credentials credentials,
                                            DistCpOptions options)
       throws IOException {
-
+    if (options.shouldUseDiff()) {
+      return new GlobbedCopyListing(configuration, credentials);
+    }
     String copyListingClassName = configuration.get(DistCpConstants.
         CONF_LABEL_COPY_LISTING_CLASS, "");
     Class<? extends CopyListing> copyListingClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
new file mode 100644
index 0000000..b617de7
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DiffInfo.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+
+/**
+ * Information presenting a rename/delete op derived from a snapshot diff entry.
+ * This includes the source file/dir of the rename/delete op, and the target
+ * file/dir of a rename op.
+ */
+class DiffInfo {
+  static final Comparator<DiffInfo> sourceComparator = new Comparator<DiffInfo>() {
+    @Override
+    public int compare(DiffInfo d1, DiffInfo d2) {
+      return d2.source.compareTo(d1.source);
+    }
+  };
+
+  static final Comparator<DiffInfo> targetComparator = new Comparator<DiffInfo>() {
+    @Override
+    public int compare(DiffInfo d1, DiffInfo d2) {
+      return d1.target == null ? -1 :
+          (d2.target ==  null ? 1 : d1.target.compareTo(d2.target));
+    }
+  };
+
+  /** The source file/dir of the rename or deletion op */
+  final Path source;
+  /**
+   * The intermediate file/dir for the op. For a rename or a delete op,
+   * we first rename the source to this tmp file/dir.
+   */
+  private Path tmp;
+  /** The target file/dir of the rename op. Null means the op is deletion. */
+  final Path target;
+
+  DiffInfo(Path source, Path target) {
+    assert source != null;
+    this.source = source;
+    this.target= target;
+  }
+
+  void setTmp(Path tmp) {
+    this.tmp = tmp;
+  }
+
+  Path getTmp() {
+    return tmp;
+  }
+
+  static DiffInfo[] getDiffs(SnapshotDiffReport report, Path targetDir) {
+    List<DiffInfo> diffs = new ArrayList<>();
+    for (SnapshotDiffReport.DiffReportEntry entry : report.getDiffList()) {
+      if (entry.getType() == SnapshotDiffReport.DiffType.DELETE) {
+        final Path source = new Path(targetDir,
+            DFSUtil.bytes2String(entry.getSourcePath()));
+        diffs.add(new DiffInfo(source, null));
+      } else if (entry.getType() == SnapshotDiffReport.DiffType.RENAME) {
+        final Path source = new Path(targetDir,
+            DFSUtil.bytes2String(entry.getSourcePath()));
+        final Path target = new Path(targetDir,
+            DFSUtil.bytes2String(entry.getTargetPath()));
+        diffs.add(new DiffInfo(source, target));
+      }
+    }
+    return diffs.toArray(new DiffInfo[diffs.size()]);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index b80aeb8..ada4b25 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.tools;
 
+import java.io.IOException;
+import java.util.Random;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -27,10 +30,10 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Cluster;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobSubmissionFiles;
-import org.apache.hadoop.mapreduce.Cluster;
 import org.apache.hadoop.tools.CopyListing.*;
 import org.apache.hadoop.tools.mapred.CopyMapper;
 import org.apache.hadoop.tools.mapred.CopyOutputFormat;
@@ -39,9 +42,6 @@ import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
-import java.io.IOException;
-import java.util.Random;
-
 import com.google.common.annotations.VisibleForTesting;
 
 /**
@@ -62,7 +62,7 @@ public class DistCp extends Configured implements Tool {
    */
   static final int SHUTDOWN_HOOK_PRIORITY = 30;
 
-  private static final Log LOG = LogFactory.getLog(DistCp.class);
+  static final Log LOG = LogFactory.getLog(DistCp.class);
 
   private DistCpOptions inputOptions;
   private Path metaFolder;
@@ -171,9 +171,13 @@ public class DistCp extends Configured implements Tool {
         //Don't cleanup while we are setting up.
         metaFolder = createMetaFolderPath();
         jobFS = metaFolder.getFileSystem(getConf());
-
         job = createJob();
       }
+      if (inputOptions.shouldUseDiff()) {
+        if (!DistCpSync.sync(inputOptions, getConf())) {
+          inputOptions.disableUsingDiff();
+        }
+      }
       createInputFileListing(job);
 
       job.submit();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 7e71096..a1af2af 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -53,6 +53,7 @@ public class DistCpConstants {
   public static final String CONF_LABEL_SKIP_CRC = "distcp.skip.crc";
   public static final String CONF_LABEL_OVERWRITE = "distcp.copy.overwrite";
   public static final String CONF_LABEL_APPEND = "distcp.copy.append";
+  public static final String CONF_LABEL_DIFF = "distcp.copy.diff";
   public static final String CONF_LABEL_BANDWIDTH_MB = "distcp.map.bandwidth.mb";
   
   public static final String CONF_LABEL_MAX_CHUNKS_TOLERABLE =
@@ -134,4 +135,6 @@ public class DistCpConstants {
    * Value of reserved raw HDFS directory when copying raw.* xattrs.
    */
   static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
+
+  static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index 159d5ca..e9c7d46 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -41,8 +41,6 @@ public enum DistCpOptionSwitch {
    * target file. Note that when preserving checksum type, block size is also 
    * preserved.
    *
-   * @see PRESERVE_STATUS_DEFAULT
-   *
    * If any of the optional switches are present among rbugpcaxt, then
    * only the corresponding file attribute is preserved.
    */
@@ -149,6 +147,11 @@ public enum DistCpOptionSwitch {
       new Option("append", false,
           "Reuse existing data in target files and append new data to them if possible")),
 
+  DIFF(DistCpConstants.CONF_LABEL_DIFF,
+      new Option("diff", false,
+      "Use snapshot diff report to identify the difference between source and target"),
+      2),
+
   /**
    * Should DisctpExecution be blocking
    */
@@ -178,6 +181,11 @@ public enum DistCpOptionSwitch {
     this.option = option;
   }
 
+  DistCpOptionSwitch(String confLabel, Option option, int argNum) {
+    this(confLabel, option);
+    this.option.setArgs(argNum);
+  }
+
   /**
    * Get Configuration label for the option
    * @return configuration label name

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index 57d2fb7..709e583 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -42,6 +42,7 @@ public class DistCpOptions {
   private boolean append = false;
   private boolean skipCRC = false;
   private boolean blocking = true;
+  private boolean useDiff = false;
 
   private int maxMaps = DistCpConstants.DEFAULT_MAPS;
   private int mapBandwidth = DistCpConstants.DEFAULT_BANDWIDTH_MB;
@@ -61,6 +62,9 @@ public class DistCpOptions {
   private Path sourceFileListing;
   private List<Path> sourcePaths;
 
+  private String fromSnapshot;
+  private String toSnapshot;
+
   private Path targetPath;
 
   // targetPathExist is a derived field, it's initialized in the 
@@ -264,6 +268,29 @@ public class DistCpOptions {
     this.append = append;
   }
 
+  public boolean shouldUseDiff() {
+    return this.useDiff;
+  }
+
+  public String getFromSnapshot() {
+    return this.fromSnapshot;
+  }
+
+  public String getToSnapshot() {
+    return this.toSnapshot;
+  }
+
+  public void setUseDiff(boolean useDiff, String fromSnapshot, String toSnapshot) {
+    validate(DistCpOptionSwitch.DIFF, useDiff);
+    this.useDiff = useDiff;
+    this.fromSnapshot = fromSnapshot;
+    this.toSnapshot = toSnapshot;
+  }
+
+  public void disableUsingDiff() {
+    this.useDiff = false;
+  }
+
   /**
    * Should CRC/checksum check be skipped while checking files are identical
    *
@@ -508,6 +535,7 @@ public class DistCpOptions {
     boolean skipCRC = (option == DistCpOptionSwitch.SKIP_CRC ?
         value : this.skipCRC);
     boolean append = (option == DistCpOptionSwitch.APPEND ? value : this.append);
+    boolean useDiff = (option == DistCpOptionSwitch.DIFF ? value : this.useDiff);
 
     if (syncFolder && atomicCommit) {
       throw new IllegalArgumentException("Atomic commit can't be used with " +
@@ -536,6 +564,10 @@ public class DistCpOptions {
       throw new IllegalArgumentException(
           "Append is disallowed when skipping CRC");
     }
+    if ((!syncFolder || !deleteMissing) && useDiff) {
+      throw new IllegalArgumentException(
+          "Diff is valid only with update and delete options");
+    }
   }
 
   /**
@@ -556,6 +588,8 @@ public class DistCpOptions {
         String.valueOf(overwrite));
     DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.APPEND,
         String.valueOf(append));
+    DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.DIFF,
+        String.valueOf(useDiff));
     DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.SKIP_CRC,
         String.valueOf(skipCRC));
     DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.BANDWIDTH,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
new file mode 100644
index 0000000..26d7eb4
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+/**
+ * This class provides the basic functionality to sync two FileSystems based on
+ * the snapshot diff report. More specifically, we have the following settings:
+ * 1. Both the source and target FileSystem must be DistributedFileSystem
+ * 2. Two snapshots (e.g., s1 and s2) have been created on the source FS.
+ * The diff between these two snapshots will be copied to the target FS.
+ * 3. The target has the same snapshot s1. No changes have been made on the
+ * target since s1. All the files/directories in the target are the same with
+ * source.s1
+ */
+class DistCpSync {
+
+  static boolean sync(DistCpOptions inputOptions, Configuration conf)
+      throws IOException {
+    List<Path> sourcePaths = inputOptions.getSourcePaths();
+    if (sourcePaths.size() != 1) {
+      // we only support one source dir which must be a snapshottable directory
+      DistCp.LOG.warn(sourcePaths.size() + " source paths are provided");
+      return false;
+    }
+    final Path sourceDir = sourcePaths.get(0);
+    final Path targetDir = inputOptions.getTargetPath();
+
+    final FileSystem sfs = sourceDir.getFileSystem(conf);
+    final FileSystem tfs = targetDir.getFileSystem(conf);
+    // currently we require both the source and the target file system are
+    // DistributedFileSystem.
+    if (!(sfs instanceof DistributedFileSystem) ||
+        !(tfs instanceof DistributedFileSystem)) {
+      DistCp.LOG.warn("To use diff-based distcp, the FileSystems needs to" +
+          " be DistributedFileSystem");
+      return false;
+    }
+    final DistributedFileSystem sourceFs = (DistributedFileSystem) sfs;
+    final DistributedFileSystem targetFs= (DistributedFileSystem) tfs;
+
+    // make sure targetFS has no change between from and the current states
+    if (!checkNoChange(inputOptions, targetFs, targetDir)) {
+      return false;
+    }
+
+    Path tmpDir = null;
+    try {
+      tmpDir = createTargetTmpDir(targetFs, targetDir);
+      DiffInfo[] diffs = getDiffs(inputOptions, sourceFs, sourceDir, targetDir);
+      if (diffs == null) {
+        return false;
+      }
+      // do the real sync work: deletion and rename
+      syncDiff(diffs, targetFs, tmpDir);
+      return true;
+    } catch (Exception e) {
+      DistCp.LOG.warn("Failed to use snapshot diff for distcp", e);
+      return false;
+    } finally {
+      deleteTargetTmpDir(targetFs, tmpDir);
+      // TODO: since we have tmp directory, we can support "undo" with failures
+    }
+  }
+
+  private static Path createTargetTmpDir(DistributedFileSystem targetFs,
+      Path targetDir) throws IOException {
+    final Path tmp = new Path(targetDir,
+        DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt());
+    if (!targetFs.mkdirs(tmp)) {
+      throw new IOException("The tmp directory " + tmp + " already exists");
+    }
+    return tmp;
+  }
+
+  private static void deleteTargetTmpDir(DistributedFileSystem targetFs,
+      Path tmpDir) {
+    try {
+      if (tmpDir != null) {
+        targetFs.delete(tmpDir, true);
+      }
+    } catch (IOException e) {
+      DistCp.LOG.error("Unable to cleanup tmp dir: " + tmpDir, e);
+    }
+  }
+
+  /**
+   * Compute the snapshot diff on the given file system. Return true if the diff
+   * is empty, i.e., no changes have happened in the FS.
+   */
+  private static boolean checkNoChange(DistCpOptions inputOptions,
+      DistributedFileSystem fs, Path path) {
+    try {
+      SnapshotDiffReport targetDiff =
+          fs.getSnapshotDiffReport(path, inputOptions.getFromSnapshot(), "");
+      if (!targetDiff.getDiffList().isEmpty()) {
+        DistCp.LOG.warn("The target has been modified since snapshot "
+            + inputOptions.getFromSnapshot());
+        return false;
+      } else {
+        return true;
+      }
+    } catch (IOException e) {
+      DistCp.LOG.warn("Failed to compute snapshot diff on " + path, e);
+    }
+    return false;
+  }
+
+  @VisibleForTesting
+  static DiffInfo[] getDiffs(DistCpOptions inputOptions,
+      DistributedFileSystem fs, Path sourceDir, Path targetDir) {
+    try {
+      SnapshotDiffReport sourceDiff = fs.getSnapshotDiffReport(sourceDir,
+          inputOptions.getFromSnapshot(), inputOptions.getToSnapshot());
+      return DiffInfo.getDiffs(sourceDiff, targetDir);
+    } catch (IOException e) {
+      DistCp.LOG.warn("Failed to compute snapshot diff on " + sourceDir, e);
+    }
+    return null;
+  }
+
+  private static void syncDiff(DiffInfo[] diffs,
+      DistributedFileSystem targetFs, Path tmpDir) throws IOException {
+    moveToTmpDir(diffs, targetFs, tmpDir);
+    moveToTarget(diffs, targetFs);
+  }
+
+  /**
+   * Move all the source files that should be renamed or deleted to the tmp
+   * directory.
+   */
+  private static void moveToTmpDir(DiffInfo[] diffs,
+      DistributedFileSystem targetFs, Path tmpDir) throws IOException {
+    // sort the diffs based on their source paths to make sure the files and
+    // subdirs are moved before moving their parents/ancestors.
+    Arrays.sort(diffs, DiffInfo.sourceComparator);
+    Random random = new Random();
+    for (DiffInfo diff : diffs) {
+      Path tmpTarget = new Path(tmpDir, diff.source.getName());
+      while (targetFs.exists(tmpTarget)) {
+        tmpTarget = new Path(tmpDir, diff.source.getName() + random.nextInt());
+      }
+      diff.setTmp(tmpTarget);
+      targetFs.rename(diff.source, tmpTarget);
+    }
+  }
+
+  /**
+   * Finish the rename operations: move all the intermediate files/directories
+   * from the tmp dir to the final targets.
+   */
+  private static void moveToTarget(DiffInfo[] diffs,
+      DistributedFileSystem targetFs) throws IOException {
+    // sort the diffs based on their target paths to make sure the parent
+    // directories are created first.
+    Arrays.sort(diffs, DiffInfo.targetComparator);
+    for (DiffInfo diff : diffs) {
+      if (diff.target != null) {
+        if (!targetFs.exists(diff.target.getParent())) {
+          targetFs.mkdirs(diff.target.getParent());
+        }
+        targetFs.rename(diff.getTmp(), diff.target);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
index 525136c..a3a76ef 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
@@ -18,13 +18,22 @@
 
 package org.apache.hadoop.tools;
 
-import org.apache.commons.cli.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
 
-import java.util.*;
+import com.google.common.base.Preconditions;
 
 /**
  * The OptionsParser parses out the command-line options passed to DistCp,
@@ -207,6 +216,13 @@ public class OptionsParser {
       }
     }
 
+    if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) {
+      String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch());
+      Preconditions.checkArgument(snapshots != null && snapshots.length == 2,
+          "Must provide both the starting and ending snapshot names");
+      option.setUseDiff(true, snapshots[0], snapshots[1]);
+    }
+
     if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) {
       String fileLimitString = getVal(command,
                               DistCpOptionSwitch.FILE_LIMIT.getSwitch().trim());
@@ -247,6 +263,10 @@ public class OptionsParser {
     }
   }
 
+  private static String[] getVals(CommandLine command, String option) {
+    return command.getOptionValues(option);
+  }
+
   public static void usage() {
     HelpFormatter formatter = new HelpFormatter();
     formatter.printHelp("distcp OPTIONS [source_path...] <target_path>\n\nOPTIONS", cliOptions);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index d5fdd7f..f36ef77 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -90,7 +90,8 @@ public class CopyCommitter extends FileOutputCommitter {
     }
 
     try {
-      if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)) {
+      if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)
+          && !(conf.getBoolean(DistCpConstants.CONF_LABEL_DIFF, false))) {
         deleteMissing(conf);
       } else if (conf.getBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false)) {
         commitData(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
new file mode 100644
index 0000000..7d5dad0
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpSync.java
@@ -0,0 +1,349 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.tools.mapred.CopyMapper;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+public class TestDistCpSync {
+  private MiniDFSCluster cluster;
+  private final Configuration conf = new HdfsConfiguration();
+  private DistributedFileSystem dfs;
+  private DistCpOptions options;
+  private final Path source = new Path("/source");
+  private final Path target = new Path("/target");
+  private final long BLOCK_SIZE = 1024;
+  private final short DATA_NUM = 1;
+
+  @Before
+  public void setUp() throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATA_NUM).build();
+    cluster.waitActive();
+
+    dfs = cluster.getFileSystem();
+    dfs.mkdirs(source);
+    dfs.mkdirs(target);
+
+    options = new DistCpOptions(Arrays.asList(source), target);
+    options.setSyncFolder(true);
+    options.setDeleteMissing(true);
+    options.setUseDiff(true, "s1", "s2");
+    options.appendToConf(conf);
+
+    conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, target.toString());
+    conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, target.toString());
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    IOUtils.cleanup(null, dfs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test the sync returns false in the following scenarios:
+   * 1. the source/target dir are not snapshottable dir
+   * 2. the source/target does not have the given snapshots
+   * 3. changes have been made in target
+   */
+  @Test
+  public void testFallback() throws Exception {
+    // the source/target dir are not snapshottable dir
+    Assert.assertFalse(DistCpSync.sync(options, conf));
+
+    // the source/target does not have the given snapshots
+    dfs.allowSnapshot(source);
+    dfs.allowSnapshot(target);
+    Assert.assertFalse(DistCpSync.sync(options, conf));
+
+    dfs.createSnapshot(source, "s1");
+    dfs.createSnapshot(source, "s2");
+    dfs.createSnapshot(target, "s1");
+    Assert.assertTrue(DistCpSync.sync(options, conf));
+
+    // changes have been made in target
+    final Path subTarget = new Path(target, "sub");
+    dfs.mkdirs(subTarget);
+    Assert.assertFalse(DistCpSync.sync(options, conf));
+
+    dfs.delete(subTarget, true);
+    Assert.assertTrue(DistCpSync.sync(options, conf));
+  }
+
+  /**
+   * create some files and directories under the given directory.
+   * the final subtree looks like this:
+   *                     dir/
+   *              foo/          bar/
+   *           d1/    f1     d2/    f2
+   *         f3            f4
+   */
+  private void initData(Path dir) throws Exception {
+    final Path foo = new Path(dir, "foo");
+    final Path bar = new Path(dir, "bar");
+    final Path d1 = new Path(foo, "d1");
+    final Path f1 = new Path(foo, "f1");
+    final Path d2 = new Path(bar, "d2");
+    final Path f2 = new Path(bar, "f2");
+    final Path f3 = new Path(d1, "f3");
+    final Path f4 = new Path(d2, "f4");
+
+    DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0);
+    DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 0);
+    DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 0);
+    DFSTestUtil.createFile(dfs, f4, BLOCK_SIZE, DATA_NUM, 0);
+  }
+
+  /**
+   * make some changes under the given directory (created in the above way).
+   * 1. rename dir/foo/d1 to dir/bar/d1
+   * 2. delete dir/bar/d1/f3
+   * 3. rename dir/foo to /dir/bar/d1/foo
+   * 4. delete dir/bar/d1/foo/f1
+   * 5. create file dir/bar/d1/foo/f1 whose size is 2*BLOCK_SIZE
+   * 6. append one BLOCK to file dir/bar/f2
+   * 7. rename dir/bar to dir/foo
+   *
+   * Thus after all these ops the subtree looks like this:
+   *                       dir/
+   *                       foo/
+   *                 d1/    f2(A)    d2/
+   *                foo/             f4
+   *                f1(new)
+   */
+  private void changeData(Path dir) throws Exception {
+    final Path foo = new Path(dir, "foo");
+    final Path bar = new Path(dir, "bar");
+    final Path d1 = new Path(foo, "d1");
+    final Path f2 = new Path(bar, "f2");
+
+    final Path bar_d1 = new Path(bar, "d1");
+    dfs.rename(d1, bar_d1);
+    final Path f3 = new Path(bar_d1, "f3");
+    dfs.delete(f3, true);
+    final Path newfoo = new Path(bar_d1, "foo");
+    dfs.rename(foo, newfoo);
+    final Path f1 = new Path(newfoo, "f1");
+    dfs.delete(f1, true);
+    DFSTestUtil.createFile(dfs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0);
+    DFSTestUtil.appendFile(dfs, f2, (int) BLOCK_SIZE);
+    dfs.rename(bar, new Path(dir, "foo"));
+  }
+
+  /**
+   * Test the basic functionality.
+   */
+  @Test
+  public void testSync() throws Exception {
+    initData(source);
+    initData(target);
+    dfs.allowSnapshot(source);
+    dfs.allowSnapshot(target);
+    dfs.createSnapshot(source, "s1");
+    dfs.createSnapshot(target, "s1");
+
+    // make changes under source
+    changeData(source);
+    dfs.createSnapshot(source, "s2");
+
+    // do the sync
+    Assert.assertTrue(DistCpSync.sync(options, conf));
+
+    // build copy listing
+    final Path listingPath = new Path("/tmp/META/fileList.seq");
+    CopyListing listing = new GlobbedCopyListing(conf, new Credentials());
+    listing.buildListing(listingPath, options);
+
+    Map<Text, CopyListingFileStatus> copyListing = getListing(listingPath);
+    CopyMapper copyMapper = new CopyMapper();
+    StubContext stubContext = new StubContext(conf, null, 0);
+    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
+        stubContext.getContext();
+    // Enable append
+    context.getConfiguration().setBoolean(
+        DistCpOptionSwitch.APPEND.getConfigLabel(), true);
+    copyMapper.setup(context);
+    for (Map.Entry<Text, CopyListingFileStatus> entry : copyListing.entrySet()) {
+      copyMapper.map(entry.getKey(), entry.getValue(), context);
+    }
+
+    // verify that we only copied new appended data of f2 and the new file f1
+    Assert.assertEquals(BLOCK_SIZE * 3, stubContext.getReporter()
+        .getCounter(CopyMapper.Counter.BYTESCOPIED).getValue());
+
+    // verify the source and target now has the same structure
+    verifyCopy(dfs.getFileStatus(source), dfs.getFileStatus(target), false);
+  }
+
+  private Map<Text, CopyListingFileStatus> getListing(Path listingPath)
+      throws Exception {
+    SequenceFile.Reader reader = new SequenceFile.Reader(conf,
+        SequenceFile.Reader.file(listingPath));
+    Text key = new Text();
+    CopyListingFileStatus value = new CopyListingFileStatus();
+    Map<Text, CopyListingFileStatus> values = new HashMap<>();
+    while (reader.next(key, value)) {
+      values.put(key, value);
+      key = new Text();
+      value = new CopyListingFileStatus();
+    }
+    return values;
+  }
+
+  private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
+      throws Exception {
+    Assert.assertEquals(s.isDirectory(), t.isDirectory());
+    if (compareName) {
+      Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
+    }
+    if (!s.isDirectory()) {
+      // verify the file content is the same
+      byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
+      byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
+      Assert.assertArrayEquals(sbytes, tbytes);
+    } else {
+      FileStatus[] slist = dfs.listStatus(s.getPath());
+      FileStatus[] tlist = dfs.listStatus(t.getPath());
+      Assert.assertEquals(slist.length, tlist.length);
+      for (int i = 0; i < slist.length; i++) {
+        verifyCopy(slist[i], tlist[i], true);
+      }
+    }
+  }
+
+  private void initData2(Path dir) throws Exception {
+    final Path test = new Path(dir, "test");
+    final Path foo = new Path(dir, "foo");
+    final Path bar = new Path(dir, "bar");
+    final Path f1 = new Path(test, "f1");
+    final Path f2 = new Path(foo, "f2");
+    final Path f3 = new Path(bar, "f3");
+
+    DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
+    DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 1L);
+    DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 2L);
+  }
+
+  private void changeData2(Path dir) throws Exception {
+    final Path tmpFoo = new Path(dir, "tmpFoo");
+    final Path test = new Path(dir, "test");
+    final Path foo = new Path(dir, "foo");
+    final Path bar = new Path(dir, "bar");
+
+    dfs.rename(test, tmpFoo);
+    dfs.rename(foo, test);
+    dfs.rename(bar, foo);
+    dfs.rename(tmpFoo, bar);
+  }
+
+  @Test
+  public void testSync2() throws Exception {
+    initData2(source);
+    initData2(target);
+    dfs.allowSnapshot(source);
+    dfs.allowSnapshot(target);
+    dfs.createSnapshot(source, "s1");
+    dfs.createSnapshot(target, "s1");
+
+    // make changes under source
+    changeData2(source);
+    dfs.createSnapshot(source, "s2");
+
+    SnapshotDiffReport report = dfs.getSnapshotDiffReport(source, "s1", "s2");
+    System.out.println(report);
+
+    // do the sync
+    Assert.assertTrue(DistCpSync.sync(options, conf));
+    verifyCopy(dfs.getFileStatus(source), dfs.getFileStatus(target), false);
+  }
+
+  private void initData3(Path dir) throws Exception {
+    final Path test = new Path(dir, "test");
+    final Path foo = new Path(dir, "foo");
+    final Path bar = new Path(dir, "bar");
+    final Path f1 = new Path(test, "file");
+    final Path f2 = new Path(foo, "file");
+    final Path f3 = new Path(bar, "file");
+
+    DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
+    DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE * 2, DATA_NUM, 1L);
+    DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE * 3, DATA_NUM, 2L);
+  }
+
+  private void changeData3(Path dir) throws Exception {
+    final Path test = new Path(dir, "test");
+    final Path foo = new Path(dir, "foo");
+    final Path bar = new Path(dir, "bar");
+    final Path f1 = new Path(test, "file");
+    final Path f2 = new Path(foo, "file");
+    final Path f3 = new Path(bar, "file");
+    final Path newf1 = new Path(test, "newfile");
+    final Path newf2 = new Path(foo, "newfile");
+    final Path newf3 = new Path(bar, "newfile");
+
+    dfs.rename(f1, newf1);
+    dfs.rename(f2, newf2);
+    dfs.rename(f3, newf3);
+  }
+
+  /**
+   * Test a case where there are multiple source files with the same name
+   */
+  @Test
+  public void testSync3() throws Exception {
+    initData3(source);
+    initData3(target);
+    dfs.allowSnapshot(source);
+    dfs.allowSnapshot(target);
+    dfs.createSnapshot(source, "s1");
+    dfs.createSnapshot(target, "s1");
+
+    // make changes under source
+    changeData3(source);
+    dfs.createSnapshot(source, "s2");
+
+    SnapshotDiffReport report = dfs.getSnapshotDiffReport(source, "s1", "s2");
+    System.out.println(report);
+
+    // do the sync
+    Assert.assertTrue(DistCpSync.sync(options, conf));
+    verifyCopy(dfs.getFileStatus(source), dfs.getFileStatus(target), false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39535ec7/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
index 30fb25b..cc9da33 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
@@ -584,7 +584,7 @@ public class TestOptionsParser {
 
     // make sure -append is only valid when -update is specified
     try {
-      options = OptionsParser.parse(new String[] { "-append",
+      OptionsParser.parse(new String[] { "-append",
               "hdfs://localhost:8020/source/first",
               "hdfs://localhost:8020/target/" });
       fail("Append should fail if update option is not specified");
@@ -595,7 +595,7 @@ public class TestOptionsParser {
 
     // make sure -append is invalid when skipCrc is specified
     try {
-      options = OptionsParser.parse(new String[] {
+      OptionsParser.parse(new String[] {
           "-append", "-update", "-skipcrccheck",
           "hdfs://localhost:8020/source/first",
           "hdfs://localhost:8020/target/" });
@@ -605,4 +605,75 @@ public class TestOptionsParser {
           "Append is disallowed when skipping CRC", e);
     }
   }
+
+  @Test
+  public void testDiffOption() {
+    Configuration conf = new Configuration();
+    Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(),
+        false));
+
+    DistCpOptions options = OptionsParser.parse(new String[] { "-update",
+        "-delete", "-diff", "s1", "s2",
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/" });
+    options.appendToConf(conf);
+    Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(), false));
+    Assert.assertTrue(options.shouldUseDiff());
+    Assert.assertEquals("s1", options.getFromSnapshot());
+    Assert.assertEquals("s2", options.getToSnapshot());
+
+    options = OptionsParser.parse(new String[] {
+        "-delete", "-diff", "s1", ".", "-update",
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/" });
+    options.appendToConf(conf);
+    Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DIFF.getConfigLabel(),
+        false));
+    Assert.assertTrue(options.shouldUseDiff());
+    Assert.assertEquals("s1", options.getFromSnapshot());
+    Assert.assertEquals(".", options.getToSnapshot());
+
+    // -diff requires two option values
+    try {
+      OptionsParser.parse(new String[] {"-diff", "s1", "-delete", "-update",
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
+      fail("-diff should fail with only one snapshot name");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Must provide both the starting and ending snapshot names", e);
+    }
+
+    // make sure -diff is only valid when -update and -delete is specified
+    try {
+      OptionsParser.parse(new String[] { "-diff", "s1", "s2",
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
+      fail("-diff should fail if -update or -delete option is not specified");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Diff is valid only with update and delete options", e);
+    }
+
+    try {
+      OptionsParser.parse(new String[] { "-diff", "s1", "s2", "-update",
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
+      fail("-diff should fail if -update or -delete option is not specified");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Diff is valid only with update and delete options", e);
+    }
+
+    try {
+      OptionsParser.parse(new String[] { "-diff", "s1", "s2",
+          "-delete", "-overwrite",
+          "hdfs://localhost:8020/source/first",
+          "hdfs://localhost:8020/target/" });
+      fail("-diff should fail if -update or -delete option is not specified");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Diff is valid only with update and delete options", e);
+    }
+  }
 }


[47/50] [abbrv] hadoop git commit: HADOOP-11673. Skip using JUnit Assume in TestCodec. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
HADOOP-11673. Skip using JUnit Assume in TestCodec.
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/662781eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/662781eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/662781eb

Branch: refs/heads/HDFS-7285
Commit: 662781eb49a925dfaa842f5b00ff9404a8ad2052
Parents: a1e4dfe
Author: Chris Douglas <cd...@apache.org>
Authored: Sun Mar 8 19:15:46 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:17:55 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/io/compress/TestCodec.java    | 16 ++++------------
 2 files changed, 7 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/662781eb/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 16002d5..0af0beb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -184,6 +184,9 @@ Trunk (Unreleased)
     HADOOP-11593. Convert site documentation from apt to markdown (stragglers)
     (Masatake Iwasaki via aw)
 
+    HADOOP-11673. Skip using JUnit Assume in TestCodec. (Brahma Reddy Battula
+    via cdouglas)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662781eb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
index 98b3934..7246bf5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
@@ -74,6 +74,7 @@ import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Test;
 import static org.junit.Assert.*;
+import static org.junit.Assume.*;
 
 public class TestCodec {
 
@@ -364,10 +365,7 @@ public class TestCodec {
   public void testCodecPoolGzipReuse() throws Exception {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
-    if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-      LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
-      return;
-    }
+    assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
     GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
     DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
     Compressor c1 = CodecPool.getCompressor(gzc);
@@ -723,10 +721,7 @@ public class TestCodec {
   public void testNativeGzipConcat() throws IOException {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
-    if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-      LOG.warn("skipped: native libs not loaded");
-      return;
-    }
+    assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
     GzipConcatTest(conf, GzipCodec.GzipZlibDecompressor.class);
   }
 
@@ -840,10 +835,7 @@ public class TestCodec {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, useNative);
     if (useNative) {
-      if (!ZlibFactory.isNativeZlibLoaded(conf)) {
-        LOG.warn("testGzipCodecWrite skipped: native libs not loaded");
-        return;
-      }
+      assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
     } else {
       assertFalse("ZlibFactory is using native libs against request",
           ZlibFactory.isNativeZlibLoaded(conf));


[33/50] [abbrv] hadoop git commit: YARN-2190. Added CPU and memory limit options to the default container executor for Windows containers. Contributed by Chuan Liu

Posted by ji...@apache.org.
YARN-2190. Added CPU and memory limit options to the default container executor for Windows containers. Contributed by Chuan Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1752b659
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1752b659
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1752b659

Branch: refs/heads/HDFS-7285
Commit: 1752b65904f2a4f5dec33770a2924410d3e74295
Parents: 667c3fc
Author: Jian He <ji...@apache.org>
Authored: Fri Mar 6 14:17:57 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:26 2015 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    |   9 +-
 .../hadoop-common/src/main/winutils/task.c      | 144 ++++++++++++++++---
 .../src/main/winutils/win8sdk.props             |  28 ++++
 .../src/main/winutils/winutils.vcxproj          |   3 +
 .../org/apache/hadoop/util/TestWinUtils.java    |  62 ++++++++
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  12 ++
 .../src/main/resources/yarn-default.xml         |  14 ++
 .../server/nodemanager/ContainerExecutor.java   |  49 ++++++-
 .../nodemanager/DefaultContainerExecutor.java   |   9 +-
 .../WindowsSecureContainerExecutor.java         |   9 +-
 .../nodemanager/TestContainerExecutor.java      |  53 +++++++
 12 files changed, 360 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 6e38ad3..b60da6c 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -209,7 +209,8 @@ Requirements:
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer
-* Windows SDK or Visual Studio 2010 Professional
+* Windows SDK 7.1 or Visual Studio 2010 Professional
+* Windows SDK 8.1 (if building CPU rate control for the container executor)
 * zlib headers (if building native code bindings for zlib)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
@@ -220,11 +221,15 @@ can be downloaded from http://git-scm.com/download/win.
 
 If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
 Do not use Visual Studio Express.  It does not support compiling for 64-bit,
-which is problematic if running a 64-bit system.  The Windows SDK is free to
+which is problematic if running a 64-bit system.  The Windows SDK 7.1 is free to
 download here:
 
 http://www.microsoft.com/en-us/download/details.aspx?id=8279
 
+The Windows SDK 8.1 is available to download at:
+
+http://msdn.microsoft.com/en-us/windows/bg162891.aspx
+
 Cygwin is neither required nor supported.
 
 ----------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-common-project/hadoop-common/src/main/winutils/task.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/task.c b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
index 21b1893..37c6ca1 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/task.c
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
@@ -49,6 +49,31 @@ typedef enum TaskCommandOptionType
   TaskProcessList
 } TaskCommandOption;
 
+ //----------------------------------------------------------------------------
+// Function: GetLimit
+//
+// Description:
+//  Get the resource limit value in long type given the command line argument.
+//
+// Returns:
+// TRUE: If successfully get the value
+// FALSE: otherwise
+static BOOL GetLimit(__in const wchar_t *str, __out long *value)
+{
+  wchar_t *end = NULL;
+  if (str == NULL || value == NULL) return FALSE;
+  *value = wcstol(str, &end, 10);
+  if (end == NULL || *end != '\0')
+  {
+    *value = -1;
+    return FALSE;
+  }
+  else
+  {
+    return TRUE;
+  }
+}
+
 //----------------------------------------------------------------------------
 // Function: ParseCommandLine
 //
@@ -61,7 +86,9 @@ typedef enum TaskCommandOptionType
 // FALSE: otherwise
 static BOOL ParseCommandLine(__in int argc,
                              __in_ecount(argc) wchar_t *argv[],
-                             __out TaskCommandOption *command)
+                             __out TaskCommandOption *command,
+                             __out_opt long *memory,
+                             __out_opt long *vcore)
 {
   *command = TaskInvalid;
 
@@ -88,9 +115,44 @@ static BOOL ParseCommandLine(__in int argc,
     }
   }
 
-  if (argc == 4) {
+  if (argc >= 4 && argc <= 8) {
     if (wcscmp(argv[1], L"create") == 0)
     {
+      int i;
+      for (i = 2; i < argc - 3; i++)
+      {
+        if (wcscmp(argv[i], L"-c") == 0)
+        {
+          if (vcore != NULL && !GetLimit(argv[i + 1], vcore))
+          {
+            return FALSE;
+          }
+          else
+          {
+            i++;
+            continue;
+          }
+        }
+        else if (wcscmp(argv[i], L"-m") == 0)
+        {
+          if (memory != NULL && !GetLimit(argv[i + 1], memory))
+          {
+            return FALSE;
+          }
+          else
+          {
+            i++;
+            continue;
+          }
+        }
+        else
+        {
+          break;
+        }
+      }
+      if (argc - i != 2)
+        return FALSE;
+
       *command = TaskCreate;
       return TRUE;
     }
@@ -573,7 +635,7 @@ done:
 // ERROR_SUCCESS: On success
 // GetLastError: otherwise
 DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PCWSTR cmdLine, 
-  __in LPCWSTR userName) 
+  __in LPCWSTR userName, __in long memory, __in long cpuRate)
 {
   DWORD dwErrorCode = ERROR_SUCCESS;
   DWORD exitCode = EXIT_FAILURE;
@@ -616,6 +678,12 @@ DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PC
     return dwErrorCode;
   }
   jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
+  if (memory > 0)
+  {
+    jeli.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY;
+    jeli.ProcessMemoryLimit = ((SIZE_T) memory) * 1024 * 1024;
+    jeli.JobMemoryLimit = ((SIZE_T) memory) * 1024 * 1024;
+  }
   if(SetInformationJobObject(jobObject, 
                              JobObjectExtendedLimitInformation, 
                              &jeli, 
@@ -626,6 +694,24 @@ DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PC
     CloseHandle(jobObject);
     return dwErrorCode;
   }
+#ifdef NTDDI_WIN8
+  if (cpuRate > 0)
+  {
+    JOBOBJECT_CPU_RATE_CONTROL_INFORMATION jcrci = { 0 };
+    SYSTEM_INFO sysinfo;
+    GetSystemInfo(&sysinfo);
+    jcrci.ControlFlags = JOB_OBJECT_CPU_RATE_CONTROL_ENABLE |
+      JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP;
+    jcrci.CpuRate = min(10000, cpuRate);
+    if(SetInformationJobObject(jobObject, JobObjectCpuRateControlInformation,
+          &jcrci, sizeof(jcrci)) == 0)
+    {
+      dwErrorCode = GetLastError();
+      CloseHandle(jobObject);
+      return dwErrorCode;
+    }
+  }
+#endif
 
   if (logonHandle != NULL) {
     dwErrorCode = AddNodeManagerAndUserACEsToObject(jobObject, userName, JOB_OBJECT_ALL_ACCESS);
@@ -809,10 +895,10 @@ create_process_done:
 // Returns:
 // ERROR_SUCCESS: On success
 // GetLastError: otherwise
-DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) 
+DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine, __in long memory, __in long cpuRate)
 {
   // call with null logon in order to create tasks utilizing the current logon
-  return CreateTaskImpl( NULL, jobObjName, cmdLine, NULL);
+  return CreateTaskImpl( NULL, jobObjName, cmdLine, NULL, memory, cpuRate);
 }
 
 //----------------------------------------------------------------------------
@@ -893,7 +979,7 @@ DWORD CreateTaskAsUser(__in PCWSTR jobObjName,
       goto done;
   }
 
-  err = CreateTaskImpl(logonHandle, jobObjName, cmdLine, user);
+  err = CreateTaskImpl(logonHandle, jobObjName, cmdLine, user, -1, -1);
 
 done: 
   if( profileIsLoaded ) {
@@ -1095,6 +1181,8 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[])
 {
   DWORD dwErrorCode = ERROR_SUCCESS;
   TaskCommandOption command = TaskInvalid;
+  long memory = -1;
+  long cpuRate = -1;
   wchar_t* cmdLine = NULL;
   wchar_t buffer[16*1024] = L""; // 32K max command line
   size_t charCountBufferLeft = sizeof(buffer)/sizeof(wchar_t);
@@ -1111,7 +1199,7 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[])
                ARGC_COMMAND_ARGS
        };
 
-  if (!ParseCommandLine(argc, argv, &command)) {
+  if (!ParseCommandLine(argc, argv, &command, &memory, &cpuRate)) {
     dwErrorCode = ERROR_INVALID_COMMAND_LINE;
 
     fwprintf(stderr, L"Incorrect command line arguments.\n\n");
@@ -1123,7 +1211,7 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[])
   {
     // Create the task jobobject
     //
-    dwErrorCode = CreateTask(argv[2], argv[3]);
+    dwErrorCode = CreateTask(argv[argc-2], argv[argc-1], memory, cpuRate);
     if (dwErrorCode != ERROR_SUCCESS)
     {
       ReportErrorCode(L"CreateTask", dwErrorCode);
@@ -1238,18 +1326,30 @@ void TaskUsage()
   // jobobject's are being used.
   // ProcessTree.isSetsidSupported()
   fwprintf(stdout, L"\
-    Usage: task create [TASKNAME] [COMMAND_LINE] |\n\
-          task createAsUser [TASKNAME] [USERNAME] [PIDFILE] [COMMAND_LINE] |\n\
-          task isAlive [TASKNAME] |\n\
-          task kill [TASKNAME]\n\
-          task processList [TASKNAME]\n\
-    Creates a new task jobobject with taskname\n\
-    Creates a new task jobobject with taskname as the user provided\n\
-    Checks if task jobobject is alive\n\
-    Kills task jobobject\n\
-    Prints to stdout a list of processes in the task\n\
-    along with their resource usage. One process per line\n\
-    and comma separated info per process\n\
-    ProcessId,VirtualMemoryCommitted(bytes),\n\
-    WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n");
+Usage: task create [OPTOINS] [TASKNAME] [COMMAND_LINE]\n\
+         Creates a new task job object with taskname and options to set CPU\n\
+         and memory limits on the job object\n\
+\n\
+         OPTIONS: -c [cup rate] set the cpu rate limit on the job object.\n\
+                  -m [memory] set the memory limit on the job object.\n\
+         The cpu limit is an integral value of percentage * 100. The memory\n\
+         limit is an integral number of memory in MB. \n\
+         The limit will not be set if 0 or negative value is passed in as\n\
+         parameter(s).\n\
+\n\
+       task createAsUser [TASKNAME] [USERNAME] [PIDFILE] [COMMAND_LINE]\n\
+         Creates a new task jobobject with taskname as the user provided\n\
+\n\
+       task isAlive [TASKNAME]\n\
+         Checks if task job object is alive\n\
+\n\
+       task kill [TASKNAME]\n\
+         Kills task job object\n\
+\n\
+       task processList [TASKNAME]\n\
+         Prints to stdout a list of processes in the task\n\
+         along with their resource usage. One process per line\n\
+         and comma separated info per process\n\
+         ProcessId,VirtualMemoryCommitted(bytes),\n\
+         WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props b/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
new file mode 100644
index 0000000..503b37a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ImportGroup Label="PropertySheets" />
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+   <ExecutablePath>$(VCInstallDir)bin\x86_amd64;$(VCInstallDir)bin;$(WindowsSdkDir)bin\NETFX 4.0 Tools;$(MSBuildProgramFiles32)\Windows Kits\8.1\bin\x86;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;$(MSBuildProgramFiles32)\HTML Help Workshop;$(FrameworkSDKDir)\bin;$(MSBuildToolsPath32);$(VSInstallDir);$(SystemRoot)\SysWow64;$(FxCopDir);$(PATH)</ExecutablePath>
+   <IncludePath>$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(FrameworkSDKDir)\include;</IncludePath>
+   <LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(MSBuildProgramFiles32)\Windows Kits\8.1\lib\win8\um\x64;$(MSBuildProgramFiles32)\Windows Kits\8.1\Lib\winv6.3\um\x64;$(FrameworkSDKDir)\lib\x64</LibraryPath>
+   <ExcludePath>$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(FrameworkSDKDir)\include;$(MSBuildToolsPath32);$(VCInstallDir)atlmfc\lib;$(VCInstallDir)lib;</ExcludePath>
+ </PropertyGroup>
+<ItemDefinitionGroup />
+</Project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
index 9ecba0a..76a7414 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
@@ -67,6 +67,9 @@
   </PropertyGroup>
   <ImportGroup Label="ExtensionSettings">
   </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="exists('$(MSBuildProgramFiles32)\Windows Kits\8.1')">
+    <Import Project="win8sdk.props" />
+  </ImportGroup>
   <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
     <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
   </ImportGroup>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
index 8ac6e40..987c706 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
@@ -547,4 +547,66 @@ public class TestWinUtils {
     
     assertThat(outNumber, containsString(testNumber));
   }
+
+  @Test (timeout = 30000)
+  public void testTaskCreateWithLimits() throws IOException {
+    // Generate a unique job id
+    String jobId = String.format("%f", Math.random());
+
+    // Run a task without any options
+    String out = Shell.execCommand(Shell.WINUTILS, "task", "create",
+        "job" + jobId, "cmd /c echo job" + jobId);
+    assertTrue(out.trim().equals("job" + jobId));
+
+    // Run a task without any limits
+    jobId = String.format("%f", Math.random());
+    out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
+        "-1", "job" + jobId, "cmd /c echo job" + jobId);
+    assertTrue(out.trim().equals("job" + jobId));
+
+    // Run a task with limits (128MB should be enough for a cmd)
+    jobId = String.format("%f", Math.random());
+    out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "10000", "-m",
+        "128", "job" + jobId, "cmd /c echo job" + jobId);
+    assertTrue(out.trim().equals("job" + jobId));
+
+    // Run a task without enough memory
+    try {
+      jobId = String.format("%f", Math.random());
+      out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-m", "128", "job"
+          + jobId, "java -Xmx256m -version");
+      fail("Failed to get Shell.ExitCodeException with insufficient memory");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1));
+    }
+
+    // Run tasks with wrong parameters
+    //
+    try {
+      jobId = String.format("%f", Math.random());
+      Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
+          "-1", "foo", "job" + jobId, "cmd /c echo job" + jobId);
+      fail("Failed to get Shell.ExitCodeException with bad parameters");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1639));
+    }
+
+    try {
+      jobId = String.format("%f", Math.random());
+      Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-m", "-1",
+          "job" + jobId, "cmd /c echo job" + jobId);
+      fail("Failed to get Shell.ExitCodeException with bad parameters");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1639));
+    }
+
+    try {
+      jobId = String.format("%f", Math.random());
+      Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "foo",
+          "job" + jobId, "cmd /c echo job" + jobId);
+      fail("Failed to get Shell.ExitCodeException with bad parameters");
+    } catch (Shell.ExitCodeException ece) {
+      assertThat(ece.getExitCode(), is(1639));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d073169..c2aa2ef 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -363,6 +363,9 @@ Release 2.7.0 - UNRELEASED
     YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
     Xuan Gong via jianhe)
 
+    YARN-2190. Added CPU and memory limit options to the default container
+    executor for Windows containers. (Chuan Liu via jianhe)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 25b808e..8c83fea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1027,6 +1027,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY =
       20;
 
+  /**
+   * Indicates if memory and CPU limits will be set for the Windows Job
+   * Object for the containers launched by the default container executor.
+   */
+  public static final String NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED =
+      NM_PREFIX + "windows-container.memory-limit.enabled";
+  public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED = false;
+
+  public static final String NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED =
+      NM_PREFIX + "windows-container.cpu-limit.enabled";
+  public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED = false;
+
   /** 
   /* The Windows group that the windows-secure-container-executor should run as.
   */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index df730d5..66400c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1075,6 +1075,20 @@
   </property>
 
   <property>
+    <description>This flag determines whether memory limit will be set for the Windows Job
+    Object of the containers launched by the default container executor.</description>
+    <name>yarn.nodemanager.windows-container.memory-limit.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>This flag determines whether CPU limit will be set for the Windows Job
+    Object of the containers launched by the default container executor.</description>
+    <name>yarn.nodemanager.windows-container.cpu-limit.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
     <description>T-file compression types used to compress aggregated logs.</description>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
     <value>none</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 77193df..248a393 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
@@ -298,6 +299,11 @@ public abstract class ContainerExecutor implements Configurable {
       readLock.unlock();
     }
   }
+
+  protected String[] getRunCommand(String command, String groupId,
+      String userName, Path pidFile, Configuration conf) {
+    return getRunCommand(command, groupId, userName, pidFile, conf, null);
+  }
   
   /** 
    *  Return a command to execute the given command in OS shell.
@@ -306,7 +312,7 @@ public abstract class ContainerExecutor implements Configurable {
    *  non-Windows, groupId is ignored. 
    */
   protected String[] getRunCommand(String command, String groupId,
-      String userName, Path pidFile, Configuration conf) {
+      String userName, Path pidFile, Configuration conf, Resource resource) {
     boolean containerSchedPriorityIsSet = false;
     int containerSchedPriorityAdjustment = 
         YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY;
@@ -320,7 +326,46 @@ public abstract class ContainerExecutor implements Configurable {
     }
   
     if (Shell.WINDOWS) {
-      return new String[] { Shell.WINUTILS, "task", "create", groupId,
+      int cpuRate = -1;
+      int memory = -1;
+      if (resource != null) {
+        if (conf
+            .getBoolean(
+                YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED,
+                YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) {
+          memory = resource.getMemory();
+        }
+
+        if (conf.getBoolean(
+            YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED,
+            YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) {
+          int containerVCores = resource.getVirtualCores();
+          int nodeVCores = conf.getInt(YarnConfiguration.NM_VCORES,
+              YarnConfiguration.DEFAULT_NM_VCORES);
+          // cap overall usage to the number of cores allocated to YARN
+          int nodeCpuPercentage = Math
+              .min(
+                  conf.getInt(
+                      YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
+                      YarnConfiguration.DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT),
+                  100);
+          nodeCpuPercentage = Math.max(0, nodeCpuPercentage);
+          if (nodeCpuPercentage == 0) {
+            String message = "Illegal value for "
+                + YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
+                + ". Value cannot be less than or equal to 0.";
+            throw new IllegalArgumentException(message);
+          }
+          float yarnVCores = (nodeCpuPercentage * nodeVCores) / 100.0f;
+          // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit
+          // should be set as 20 * 100. The following setting is equal to:
+          // 100 * (100 * (vcores / Total # of cores allocated to YARN))
+          cpuRate = Math.min(10000,
+              (int) ((containerVCores * 10000) / yarnVCores));
+        }
+      }
+      return new String[] { Shell.WINUTILS, "task", "create", "-m",
+          String.valueOf(memory), "-c", String.valueOf(cpuRate), groupId,
           "cmd /c " + command };
     } else {
       List<String> retCommand = new ArrayList<String>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index f3d2121..e0ecea3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.util.Shell.CommandExecutor;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -202,7 +203,7 @@ public class DefaultContainerExecutor extends ContainerExecutor {
       setScriptExecutable(sb.getWrapperScriptPath(), user);
 
       shExec = buildCommandExecutor(sb.getWrapperScriptPath().toString(),
-          containerIdStr, user, pidFile,
+          containerIdStr, user, pidFile, container.getResource(),
           new File(containerWorkDir.toUri().getPath()),
           container.getLaunchContext().getEnvironment());
       
@@ -256,12 +257,12 @@ public class DefaultContainerExecutor extends ContainerExecutor {
   }
 
   protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, 
-      String containerIdStr, String user, Path pidFile, File wordDir, 
-      Map<String, String> environment) 
+      String containerIdStr, String user, Path pidFile, Resource resource,
+      File wordDir, Map<String, String> environment)
           throws IOException {
     
     String[] command = getRunCommand(wrapperScriptPath,
-        containerIdStr, user, pidFile, this.getConf());
+        containerIdStr, user, pidFile, this.getConf(), resource);
 
       LOG.info("launchContainer: " + Arrays.toString(command));
       return new ShellCommandExecutor(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
index cd3e71a..b7bec5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.CommandExecutor;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
@@ -727,11 +728,9 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
    }
  
    @Override
-   protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, 
-       String containerIdStr,
-     String userName, Path pidFile,File wordDir, Map<String, String> environment) 
-     throws IOException {
-
+  protected CommandExecutor buildCommandExecutor(String wrapperScriptPath,
+      String containerIdStr, String userName, Path pidFile, Resource resource,
+      File wordDir, Map<String, String> environment) throws IOException {
      return new WintuilsProcessStubExecutor(
          wordDir.toString(),
          containerIdStr, userName, pidFile.toString(), 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1752b659/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
index fd3634b..dc3e941 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
@@ -18,13 +18,21 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
+import java.util.Arrays;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 
+import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+import org.junit.Assert;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
 public class TestContainerExecutor {
   
@@ -69,4 +77,49 @@ public class TestContainerExecutor {
     }
   }
 
+  @Test (timeout = 5000)
+  public void testRunCommandWithNoResources() {
+    // Windows only test
+    assumeTrue(Shell.WINDOWS);
+    Configuration conf = new Configuration();
+    String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
+        conf, Resource.newInstance(1024, 1));
+    // Assert the cpu and memory limits are set correctly in the command
+    String[] expected = { Shell.WINUTILS, "task", "create", "-m", "-1", "-c",
+        "-1", "group1", "cmd /c " + "echo" };
+    Assert.assertTrue(Arrays.equals(expected, command));
+  }
+
+  @Test (timeout = 5000)
+  public void testRunCommandWithMemoryOnlyResources() {
+    // Windows only test
+    assumeTrue(Shell.WINDOWS);
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
+    String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
+        conf, Resource.newInstance(1024, 1));
+    // Assert the cpu and memory limits are set correctly in the command
+    String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
+        "-1", "group1", "cmd /c " + "echo" };
+    Assert.assertTrue(Arrays.equals(expected, command));
+  }
+
+  @Test (timeout = 5000)
+  public void testRunCommandWithCpuAndMemoryResources() {
+    // Windows only test
+    assumeTrue(Shell.WINDOWS);
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, "true");
+    conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
+    String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
+        conf, Resource.newInstance(1024, 1));
+    float yarnProcessors = NodeManagerHardwareUtils.getContainersCores(
+        ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf),
+        conf);
+    int cpuRate = Math.min(10000, (int) ((1 * 10000) / yarnProcessors));
+    // Assert the cpu and memory limits are set correctly in the command
+    String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
+        String.valueOf(cpuRate), "group1", "cmd /c " + "echo" };
+    Assert.assertTrue(Arrays.equals(expected, command));
+  }
 }


[06/50] [abbrv] hadoop git commit: MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Akira AJISAKA.

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
index fa3708e..2c69542 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
@@ -181,7 +181,7 @@ public static final String OUTDIR = "mapreduce.output.fileoutputformat.outputdir
    *  Get the {@link Path} to the task's temporary output directory 
    *  for the map-reduce job
    *  
-   * <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+   * <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
    * 
    * <p>Some applications need to create/write-to side-files, which differ from
    * the actual job-outputs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
index 24baa59..c31cab7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MultipleOutputs.java
@@ -81,7 +81,7 @@ import java.util.*;
  * <p>
  * Usage in Reducer:
  * <pre>
- * <K, V> String generateFileName(K k, V v) {
+ * &lt;K, V&gt; String generateFileName(K k, V v) {
  *   return k.toString() + "_" + v.toString();
  * }
  * 
@@ -124,16 +124,16 @@ import java.util.*;
  * </p>
  * 
  * <pre>
- * private MultipleOutputs<Text, Text> out;
+ * private MultipleOutputs&lt;Text, Text&gt; out;
  * 
  * public void setup(Context context) {
- *   out = new MultipleOutputs<Text, Text>(context);
+ *   out = new MultipleOutputs&lt;Text, Text&gt;(context);
  *   ...
  * }
  * 
- * public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
+ * public void reduce(Text key, Iterable&lt;Text&gt; values, Context context) throws IOException, InterruptedException {
  * for (Text t : values) {
- *   out.write(key, t, generateFileName(<<i>parameter list...</i>>));
+ *   out.write(key, t, generateFileName(&lt;<i>parameter list...</i>&gt;));
  *   }
  * }
  * 
@@ -294,7 +294,6 @@ public class MultipleOutputs<KEYOUT, VALUEOUT> {
 
   /**
    * Adds a named output for the job.
-   * <p/>
    *
    * @param job               job to add the named output
    * @param namedOutput       named output name, it has to be a word, letters

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
index 4a40840..2a89908 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.mapreduce.Partitioner;
  *   <li>{@link #setOffsets}</li>
  *   <li>{@link #setLeftOffset}</li>
  *   <li>{@link #setRightOffset}</li>
- * </ul></p>
+ * </ul>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
index 247c2f2..b9014ef 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/JobContextImpl.java
@@ -374,7 +374,6 @@ public class JobContextImpl implements JobContext {
    * Get the timestamps of the archives.  Used by internal
    * DistributedCache and MapReduce code.
    * @return a string array of timestamps 
-   * @throws IOException
    */
   public String[] getArchiveTimestamps() {
     return toTimestampStrs(DistributedCache.getArchiveTimestamps(conf));
@@ -384,7 +383,6 @@ public class JobContextImpl implements JobContext {
    * Get the timestamps of the files.  Used by internal
    * DistributedCache and MapReduce code.
    * @return a string array of timestamps 
-   * @throws IOException
    */
   public String[] getFileTimestamps() {
     return toTimestampStrs(DistributedCache.getFileTimestamps(conf));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
index 40e101a..6cb3211 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomTextWriter.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.util.ToolRunner;
  * random sequence of words.
  * In order for this program to generate data for terasort with a 5-10 words
  * per key and 20-100 words per value, have the following config:
- * <xmp>
+ * <pre>{@code
  * <?xml version="1.0"?>
  * <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  * <configuration>
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
  *     <name>mapreduce.randomtextwriter.totalbytes</name>
  *     <value>1099511627776</value>
  *   </property>
- * </configuration></xmp>
+ * </configuration>}</pre>
  * 
  * Equivalently, {@link RandomTextWriter} also supports all the above options
  * and ones supported by {@link Tool} via the command-line.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
index a326c8c..67c9ca8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/RandomWriter.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.util.ToolRunner;
  * random binary sequence file of BytesWritable.
  * In order for this program to generate data for terasort with 10-byte keys
  * and 90-byte values, have the following config:
- * <xmp>
+ * <pre>{@code
  * <?xml version="1.0"?>
  * <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  * <configuration>
@@ -71,8 +71,7 @@ import org.apache.hadoop.util.ToolRunner;
  *     <name>mapreduce.randomwriter.totalbytes</name>
  *     <value>1099511627776</value>
  *   </property>
- * </configuration></xmp>
- * 
+ * </configuration>}</pre>
  * Equivalently, {@link RandomWriter} also supports all the above options
  * and ones supported by {@link GenericOptionsParser} via the command-line.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
index d3df4b3..b51946e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/MultiFileWordCount.java
@@ -199,7 +199,7 @@ public class MultiFileWordCount extends Configured implements Tool {
   }
 
   /**
-   * This Mapper is similar to the one in {@link WordCount.MapClass}.
+   * This Mapper is similar to the one in {@link WordCount.TokenizerMapper}.
    */
   public static class MapClass extends 
       Mapper<WordOffset, Text, Text, IntWritable> {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
index d565098..25dee6b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.util.ToolRunner;
  * where $S=[0,1)^2$ is a unit square,
  * $x=(x_1,x_2)$ is a 2-dimensional point,
  * and $f$ is a function describing the inscribed circle of the square $S$,
- * $f(x)=1$ if $(2x_1-1)^2+(2x_2-1)^2 <= 1$ and $f(x)=0$, otherwise.
+ * $f(x)=1$ if $(2x_1-1)^2+(2x_2-1)^2 &lt;= 1$ and $f(x)=0$, otherwise.
  * It is easy to see that Pi is equal to $4I$.
  * So an approximation of Pi is obtained once $I$ is evaluated numerically.
  * 
@@ -155,7 +155,7 @@ public class QuasiMonteCarlo extends Configured implements Tool {
     /** Map method.
      * @param offset samples starting from the (offset+1)th sample.
      * @param size the number of samples for this map
-     * @param context output {ture->numInside, false->numOutside}
+     * @param context output {ture-&gt;numInside, false-&gt;numOutside}
      */
     public void map(LongWritable offset,
                     LongWritable size,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
index 4d555c6..6309ee6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomTextWriter.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.util.ToolRunner;
  * random sequence of words.
  * In order for this program to generate data for terasort with a 5-10 words
  * per key and 20-100 words per value, have the following config:
- * <xmp>
+ * <pre>{@code
  * <?xml version="1.0"?>
  * <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  * <configuration>
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
  *     <name>mapreduce.randomtextwriter.totalbytes</name>
  *     <value>1099511627776</value>
  *   </property>
- * </configuration></xmp>
+ * </configuration>}</pre>
  * 
  * Equivalently, {@link RandomTextWriter} also supports all the above options
  * and ones supported by {@link Tool} via the command-line.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
index e1c13ec..8f322b1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/RandomWriter.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.util.ToolRunner;
  * random binary sequence file of BytesWritable.
  * In order for this program to generate data for terasort with 10-byte keys
  * and 90-byte values, have the following config:
- * <xmp>
+ * <pre>{@code
  * <?xml version="1.0"?>
  * <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
  * <configuration>
@@ -71,8 +71,7 @@ import org.apache.hadoop.util.ToolRunner;
  *     <name>mapreduce.randomwriter.totalbytes</name>
  *     <value>1099511627776</value>
  *   </property>
- * </configuration></xmp>
- * 
+ * </configuration>}</pre>
  * Equivalently, {@link RandomWriter} also supports all the above options
  * and ones supported by {@link GenericOptionsParser} via the command-line.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java
index d536ec9..8841fdc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/SecondarySort.java
@@ -74,7 +74,7 @@ public class SecondarySort {
     }
     /**
      * Read the two integers. 
-     * Encoded as: MIN_VALUE -> 0, 0 -> -MIN_VALUE, MAX_VALUE-> -1
+     * Encoded as: MIN_VALUE -&gt; 0, 0 -&gt; -MIN_VALUE, MAX_VALUE-&gt; -1
      */
     @Override
     public void readFields(DataInput in) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java
index 4484d20..268066c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistBbp.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.util.ToolRunner;
  * A map/reduce program that uses a BBP-type method to compute exact 
  * binary digits of Pi.
  * This program is designed for computing the n th bit of Pi,
- * for large n, say n >= 10^8.
+ * for large n, say n &gt;= 10^8.
  * For computing lower bits of Pi, consider using bbp.
  *
  * The actually computation is done by DistSum jobs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java
index 58f859d..1c039a2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java
@@ -78,7 +78,7 @@ public class Modular {
     return x >= 1? x - 1: x < 0? x + 1: x;
   }
 
-  /** Given 0 < x < y,
+  /** Given 0 &lt; x &lt; y,
    * return x^(-1) mod y.
    */
   public static long modInverse(final long x, final long y) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java
index 94f9baa..beb0743 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/GenSort.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.util.PureJavaCrc32;
 
 /** 
  * A single process data generator for the terasort data. Based on gensort.c 
- * version 1.1 (3 Mar 2009) from Chris Nyberg <ch...@ordinal.com>.
+ * version 1.1 (3 Mar 2009) from Chris Nyberg &lt;chris.nyberg@ordinal.com&gt;.
  */
 public class GenSort {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index ab5b802..a7b68a9 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -38,10 +38,10 @@ import com.google.common.collect.Sets;
 /**
  * The CopyListing abstraction is responsible for how the list of
  * sources and targets is constructed, for DistCp's copy function.
- * The copy-listing should be a SequenceFile<Text, CopyListingFileStatus>,
- * located at the path specified to buildListing(),
- * each entry being a pair of (Source relative path, source file status),
- * all the paths being fully qualified.
+ * The copy-listing should be a
+ * SequenceFile&lt;Text, CopyListingFileStatus&gt;, located at the path
+ * specified to buildListing(), each entry being a pair of (Source relative
+ * path, source file status), all the paths being fully qualified.
  */
 public abstract class CopyListing extends Configured {
 
@@ -95,8 +95,8 @@ public abstract class CopyListing extends Configured {
    * Validate input and output paths
    *
    * @param options - Input options
-   * @throws InvalidInputException: If inputs are invalid
-   * @throws IOException: any Exception with FS 
+   * @throws InvalidInputException If inputs are invalid
+   * @throws IOException any Exception with FS
    */
   protected abstract void validatePaths(DistCpOptions options)
       throws IOException, InvalidInputException;
@@ -105,7 +105,7 @@ public abstract class CopyListing extends Configured {
    * The interface to be implemented by sub-classes, to create the source/target file listing.
    * @param pathToListFile Path on HDFS where the listing file is written.
    * @param options Input Options for DistCp (indicating source/target paths.)
-   * @throws IOException: Thrown on failure to create the listing file.
+   * @throws IOException Thrown on failure to create the listing file.
    */
   protected abstract void doBuildListing(Path pathToListFile,
                                          DistCpOptions options) throws IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index d202f0a..28535a7 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -76,7 +76,7 @@ public class DistCp extends Configured implements Tool {
    * (E.g. source-paths, target-location, etc.)
    * @param inputOptions Options (indicating source-paths, target-location.)
    * @param configuration The Hadoop configuration against which the Copy-mapper must run.
-   * @throws Exception, on failure.
+   * @throws Exception
    */
   public DistCp(Configuration configuration, DistCpOptions inputOptions) throws Exception {
     Configuration config = new Configuration(configuration);
@@ -142,7 +142,7 @@ public class DistCp extends Configured implements Tool {
    * Implements the core-execution. Creates the file-list for copy,
    * and launches the Hadoop-job, to do the copy.
    * @return Job handle
-   * @throws Exception, on failure.
+   * @throws Exception
    */
   public Job execute() throws Exception {
     assert inputOptions != null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index d263f82..159d5ca 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -105,7 +105,7 @@ public enum DistCpOptionSwitch {
    * Copy all the source files and commit them atomically to the target
    * This is typically useful in cases where there is a process
    * polling for availability of a file/dir. This option is incompatible
-   * with SYNC_FOLDERS & DELETE_MISSING
+   * with SYNC_FOLDERS and DELETE_MISSING
    */
   ATOMIC_COMMIT(DistCpConstants.CONF_LABEL_ATOMIC_COPY,
       new Option("atomic", false, "Commit all changes or none")),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
index 4bbc30d..525136c 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
@@ -63,7 +63,7 @@ public class OptionsParser {
    * @param args Command-line arguments (excluding the options consumed
    *              by the GenericOptionsParser).
    * @return The Options object, corresponding to the specified command-line.
-   * @throws IllegalArgumentException: Thrown if the parse fails.
+   * @throws IllegalArgumentException Thrown if the parse fails.
    */
   public static DistCpOptions parse(String args[]) throws IllegalArgumentException {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 197edd9..d5fdd7f 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -70,7 +70,7 @@ public class CopyCommitter extends FileOutputCommitter {
     this.taskAttemptContext = context;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public void commitJob(JobContext jobContext) throws IOException {
     Configuration conf = jobContext.getConfiguration();
@@ -102,7 +102,7 @@ public class CopyCommitter extends FileOutputCommitter {
     }
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public void abortJob(JobContext jobContext,
                        JobStatus.State state) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
index ab57127..cca36df 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.util.StringUtils;
 
 /**
  * Mapper class that executes the DistCp copy operation.
- * Implements the o.a.h.mapreduce.Mapper<> interface.
+ * Implements the o.a.h.mapreduce.Mapper interface.
  */
 public class CopyMapper extends Mapper<Text, CopyListingFileStatus, Text, Text> {
 
@@ -182,10 +182,11 @@ public class CopyMapper extends Mapper<Text, CopyListingFileStatus, Text, Text>
   }
 
   /**
-   * Implementation of the Mapper<>::map(). Does the copy.
+   * Implementation of the Mapper::map(). Does the copy.
    * @param relPath The target path.
    * @param sourceFileStatus The source path.
    * @throws IOException
+   * @throws InterruptedException
    */
   @Override
   public void map(Text relPath, CopyListingFileStatus sourceFileStatus,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyOutputFormat.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyOutputFormat.java
index eb43aa3..a5bd605 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyOutputFormat.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyOutputFormat.java
@@ -97,13 +97,13 @@ public class CopyOutputFormat<K, V> extends TextOutputFormat<K, V> {
     }
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException {
     return new CopyCommitter(getOutputPath(context), context);
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException {
     Configuration conf = context.getConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 1d61156..65d644b 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -84,8 +84,7 @@ public class RetriableFileCopyCommand extends RetriableCommand {
    * This is the actual copy-implementation.
    * @param arguments Argument-list to the command.
    * @return Number of bytes copied.
-   * @throws Exception: CopyReadException, if there are read-failures. All other
-   *         failures are IOExceptions.
+   * @throws Exception
    */
   @SuppressWarnings("unchecked")
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java
index 4add0bb..8dc7a65 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java
@@ -38,7 +38,7 @@ import java.util.List;
 import java.util.ArrayList;
 
 /**
- * UniformSizeInputFormat extends the InputFormat<> class, to produce
+ * UniformSizeInputFormat extends the InputFormat class, to produce
  * input-splits for DistCp.
  * It looks at the copy-listing and groups the contents into input-splits such
  * that the total-number of bytes to be copied for each input split is
@@ -55,7 +55,7 @@ public class UniformSizeInputFormat
    * approximately equal.
    * @param context JobContext for the job.
    * @return The list of uniformly-distributed input-splits.
-   * @throws IOException: On failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
index f5303d5..38269c7 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
@@ -64,7 +64,7 @@ public class DynamicInputFormat<K, V> extends InputFormat<K, V> {
    * tasks.
    * @param jobContext JobContext for the map job.
    * @return The list of (empty) dynamic input-splits.
-   * @throws IOException, on failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override
@@ -343,7 +343,7 @@ public class DynamicInputFormat<K, V> extends InputFormat<K, V> {
    * @param inputSplit The split for which the RecordReader is required.
    * @param taskAttemptContext TaskAttemptContext for the current attempt.
    * @return DynamicRecordReader instance.
-   * @throws IOException, on failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
index 40d75f4..00b3c69 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
@@ -57,7 +57,7 @@ public class DynamicRecordReader<K, V> extends RecordReader<K, V> {
    * RecordReader to read from chunks.
    * @param inputSplit The InputSplit for the map. Ignored entirely.
    * @param taskAttemptContext The AttemptContext.
-   * @throws IOException, on failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override
@@ -88,7 +88,7 @@ public class DynamicRecordReader<K, V> extends RecordReader<K, V> {
    * been completely exhausted, an new chunk is acquired and read,
    * transparently.
    * @return True, if the nextValue() could be traversed to. False, otherwise.
-   * @throws IOException, on failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override
@@ -130,7 +130,7 @@ public class DynamicRecordReader<K, V> extends RecordReader<K, V> {
   /**
    * Implementation of RecordReader::getCurrentKey().
    * @return The key of the current record. (i.e. the source-path.)
-   * @throws IOException, on failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override
@@ -142,7 +142,7 @@ public class DynamicRecordReader<K, V> extends RecordReader<K, V> {
   /**
    * Implementation of RecordReader::getCurrentValue().
    * @return The value of the current record. (i.e. the target-path.)
-   * @throws IOException, on failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override
@@ -154,7 +154,7 @@ public class DynamicRecordReader<K, V> extends RecordReader<K, V> {
   /**
    * Implementation of RecordReader::getProgress().
    * @return A fraction [0.0,1.0] indicating the progress of a DistCp mapper.
-   * @throws IOException, on failure.
+   * @throws IOException
    * @throws InterruptedException
    */
   @Override
@@ -192,7 +192,7 @@ public class DynamicRecordReader<K, V> extends RecordReader<K, V> {
   /**
    * Implementation of RecordReader::close().
    * Closes the RecordReader.
-   * @throws IOException, on failure.
+   * @throws IOException
    */
   @Override
   public void close()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index ca7566b..20fdf11 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -65,7 +65,7 @@ public class DistCpUtils {
    * @param path The path of the file whose size is sought.
    * @param configuration Configuration, to retrieve the appropriate FileSystem.
    * @return The file-size, in number of bytes.
-   * @throws IOException, on failure.
+   * @throws IOException
    */
   public static long getFileSize(Path path, Configuration configuration)
                                             throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/RetriableCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/RetriableCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/RetriableCommand.java
index 563372e..c27b2e1 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/RetriableCommand.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/RetriableCommand.java
@@ -77,7 +77,7 @@ public abstract class RetriableCommand {
    *  2. the command may no longer be retried (e.g. runs out of retry-attempts).
    * @param arguments The list of arguments for the command.
    * @return Generic "Object" from doExecute(), on success.
-   * @throws IOException, IOException, on complete failure.
+   * @throws Exception
    */
   public Object execute(Object... arguments) throws Exception {
     Exception latestException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
index d08a301..9e435d9 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
@@ -62,7 +62,7 @@ public class ThrottledInputStream extends InputStream {
     rawStream.close();
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read() throws IOException {
     throttle();
@@ -73,7 +73,7 @@ public class ThrottledInputStream extends InputStream {
     return data;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read(byte[] b) throws IOException {
     throttle();
@@ -84,7 +84,7 @@ public class ThrottledInputStream extends InputStream {
     return readLen;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
     throttle();
@@ -155,7 +155,7 @@ public class ThrottledInputStream extends InputStream {
     return totalSleepTime;
   }
 
-  /** @inheritDoc */
+  /** {@inheritDoc} */
   @Override
   public String toString() {
     return "ThrottledInputStream{" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
index 050bfbe..449ecbf 100644
--- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
+++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
@@ -60,7 +60,9 @@ import org.apache.hadoop.mapreduce.lib.map.RegexMapper;
  *  b) Directory on dfs to archive the logs. 
  *  c) The sort/grep patterns for analyzing the files and separator for boundaries.
  * Usage: 
- * Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>   
+ * Logalyzer -archive -archiveDir &lt;directory to archive logs&gt; -analysis
+ * &lt;directory&gt; -logs &lt;log-list uri&gt; -grep &lt;pattern&gt; -sort
+ * &lt;col1, col2&gt; -separator &lt;separator&gt;
  * <p>
  */
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
index 593c1a4..7a80e8d 100644
--- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/ResourceUsageEmulatorPlugin.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.conf.Configuration;
  * {@link ResourceUsageEmulatorPlugin} is also configured with a feedback module
  * i.e a {@link ResourceCalculatorPlugin}, to monitor the current resource 
  * usage. {@link ResourceUsageMetrics} decides the final resource usage value to
- * emulate. {@link Progressive} keeps track of the task's progress.</p>
+ * emulate. {@link Progressive} keeps track of the task's progress.
  * 
  * <br><br>
  * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/RestClientBindings.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/RestClientBindings.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/RestClientBindings.java
index 25a7e93..d11c369 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/RestClientBindings.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/RestClientBindings.java
@@ -31,10 +31,10 @@ import static org.apache.hadoop.fs.swift.http.SwiftProtocolConstants.*;
 /**
  * This class implements the binding logic between Hadoop configurations
  * and the swift rest client.
- * <p/>
+ * <p>
  * The swift rest client takes a Properties instance containing
  * the string values it uses to bind to a swift endpoint.
- * <p/>
+ * <p>
  * This class extracts the values for a specific filesystem endpoint
  * and then builds an appropriate Properties file.
  */
@@ -188,7 +188,7 @@ public final class RestClientBindings {
 
   /**
    * Copy a (trimmed) property from the configuration file to the properties file.
-   * <p/>
+   * <p>
    * If marked as required and not found in the configuration, an
    * exception is raised.
    * If not required -and missing- then the property will not be set.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/SwiftRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/SwiftRestClient.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/SwiftRestClient.java
index 28f8b47..55dad11 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/SwiftRestClient.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/SwiftRestClient.java
@@ -1061,10 +1061,9 @@ public final class SwiftRestClient {
    * Authenticate to Openstack Keystone
    * As well as returning the access token, the member fields {@link #token},
    * {@link #endpointURI} and {@link #objectLocationURI} are set up for re-use.
-   * <p/>
+   * <p>
    * This method is re-entrant -if more than one thread attempts to authenticate
    * neither will block -but the field values with have those of the last caller.
-   * <p/>
    *
    * @return authenticated access token
    */
@@ -1575,6 +1574,7 @@ public final class SwiftRestClient {
    * @param path path to object
    * @param endpointURI damain url e.g. http://domain.com
    * @return valid URI for object
+   * @throws SwiftException
    */
   public static URI pathToURI(SwiftObjectPath path,
                               URI endpointURI) throws SwiftException {
@@ -1820,7 +1820,7 @@ public final class SwiftRestClient {
 
   /**
    * Get the blocksize of this filesystem
-   * @return a blocksize >0
+   * @return a blocksize &gt; 0
    */
   public long getBlocksizeKB() {
     return blocksizeKB;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
index b70f7ef..27a572f 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
@@ -225,10 +225,10 @@ public class SwiftNativeFileSystem extends FileSystem {
    * Return an array containing hostnames, offset and size of
    * portions of the given file.  For a nonexistent
    * file or regions, null will be returned.
-   * <p/>
+   * <p>
    * This call is most helpful with DFS, where it returns
    * hostnames of machines that contain the given file.
-   * <p/>
+   * <p>
    * The FileSystem will simply return an elt containing 'localhost'.
    */
   @Override
@@ -645,7 +645,7 @@ public class SwiftNativeFileSystem extends FileSystem {
   /**
    * Low level method to do a deep listing of all entries, not stopping
    * at the next directory entry. This is to let tests be confident that
-   * recursive deletes &c really are working.
+   * recursive deletes really are working.
    * @param path path to recurse down
    * @param newest ask for the newest data, potentially slower than not.
    * @return a potentially empty array of file status

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
index 0138eae..6d812a0 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
@@ -518,7 +518,7 @@ public class SwiftNativeFileSystemStore {
    * Rename through copy-and-delete. this is a consequence of the
    * Swift filesystem using the path as the hash
    * into the Distributed Hash Table, "the ring" of filenames.
-   * <p/>
+   * <p>
    * Because of the nature of the operation, it is not atomic.
    *
    * @param src source file/dir
@@ -847,7 +847,7 @@ public class SwiftNativeFileSystemStore {
   }
 
   /**
-   * Insert a throttled wait if the throttle delay >0
+   * Insert a throttled wait if the throttle delay &gt; 0
    * @throws InterruptedIOException if interrupted during sleep
    */
   public void throttle() throws InterruptedIOException {
@@ -878,7 +878,7 @@ public class SwiftNativeFileSystemStore {
    * raised. This lets the caller distinguish a file not found with
    * other reasons for failure, so handles race conditions in recursive
    * directory deletes better.
-   * <p/>
+   * <p>
    * The problem being addressed is: caller A requests a recursive directory
    * of directory /dir ; caller B requests a delete of a file /dir/file,
    * between caller A enumerating the files contents, and requesting a delete

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
index c9e26ac..01ec739 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
@@ -236,7 +236,7 @@ public class SwiftTestUtils extends org.junit.Assert {
 
   /**
    * Convert a byte to a character for printing. If the
-   * byte value is < 32 -and hence unprintable- the byte is
+   * byte value is &lt; 32 -and hence unprintable- the byte is
    * returned as a two digit hex value
    * @param b byte
    * @return the printable character string

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/InputDemuxer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/InputDemuxer.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/InputDemuxer.java
index cd99e1c..0927a77 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/InputDemuxer.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/InputDemuxer.java
@@ -45,12 +45,12 @@ public interface InputDemuxer extends Closeable {
   public void bindTo(Path path, Configuration conf) throws IOException;
 
   /**
-   * Get the next <name, input> pair. The name should preserve the original job
+   * Get the next &lt;name, input&gt; pair. The name should preserve the original job
    * history file or job conf file name. The input object should be closed
    * before calling getNext() again. The old input object would be invalid after
    * calling getNext() again.
    * 
-   * @return the next <name, input> pair.
+   * @return the next &lt;name, input&gt; pair.
    */
   public Pair<String, InputStream> getNext() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
index c2537be..7547eca 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
@@ -67,8 +67,9 @@ import org.apache.log4j.Logger;
  * ignoring user-specific and hard-to-parse keys but also provides a consistent
  * view for all possible inputs. So if users invoke the 
  * {@link #parseJobProperty(String, String)} API with either
- * <"mapreduce.job.user.name", "bob"> or <"user.name", "bob">, then the result 
- * would be a {@link UserName} {@link DataType} wrapping the user-name "bob".
+ * &lt;"mapreduce.job.user.name", "bob"&gt; or &lt;"user.name", "bob"&gt;,
+ * then the result would be a {@link UserName} {@link DataType} wrapping
+ * the user-name "bob".
  */
 @SuppressWarnings("deprecation")
 public class MapReduceJobPropertiesParser implements JobPropertyParser {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef07f/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/package-info.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/package-info.java
index b88b37e..2253225 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/package-info.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/package-info.java
@@ -41,7 +41,7 @@
  *        String conf_filename = .. // assume the job configuration filename here
  *        
  *        // construct a list of interesting properties
- *        List<String> interestedProperties = new ArrayList<String>();
+ *        List&lt;String&gt; interestedProperties = new ArrayList&lt;String&gt;();
  *        interestedProperties.add("mapreduce.job.name");
  *        
  *        JobConfigurationParser jcp = 
@@ -154,7 +154,7 @@
  *        TopologyBuilder tb = new TopologyBuilder();
  *        
  *        // construct a list of interesting properties
- *        List<String> interestingProperties = new ArrayList<Strng>();
+ *        List&lt;String&gt; interestingProperties = new ArrayList%lt;String&gt;();
  *        // add the interesting properties here
  *        interestingProperties.add("mapreduce.job.name");
  *        
@@ -207,7 +207,7 @@
  *        JobBuilder jb = new JobBuilder(jobID);
  *        
  *        // construct a list of interesting properties
- *        List<String> interestingProperties = new ArrayList<Strng>();
+ *        List&lt;String&gt; interestingProperties = new ArrayList%lt;String&gt;();
  *        // add the interesting properties here
  *        interestingProperties.add("mapreduce.job.name");
  *        
@@ -269,7 +269,7 @@
  *        TopologyBuilder tb = new TopologyBuilder();
  *        
  *        // construct a list of interesting properties
- *        List<String> interestingProperties = new ArrayList<Strng>();
+ *        List&lt;String&gt; interestingProperties = new ArrayList%lt;String&gt;();
  *        // add the interesting properties here
  *        interestingProperties.add("mapreduce.job.name");
  *        


[24/50] [abbrv] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
index 7bac6f2..2cd7580 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -20,15 +20,16 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 import static org.apache.hadoop.yarn.webapp.Params.TITLE;
 import static org.mockito.Mockito.mock;
-import org.junit.Assert;
 
+import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerImpl;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
@@ -68,8 +69,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testView() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(5, 1, 1));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(5, 1, 1));
     AHSView ahsViewInstance = injector.getInstance(AHSView.class);
 
     ahsViewInstance.render();
@@ -89,8 +90,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppPage() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(1, 5, 1));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(1, 5, 1));
     AppPage appPageInstance = injector.getInstance(AppPage.class);
 
     appPageInstance.render();
@@ -105,8 +106,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testAppAttemptPage() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(1, 1, 5));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(1, 1, 5));
     AppAttemptPage appAttemptPageInstance =
         injector.getInstance(AppAttemptPage.class);
 
@@ -123,8 +124,8 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   @Test
   public void testContainerPage() throws Exception {
     Injector injector =
-        WebAppTests.createMockInjector(ApplicationContext.class,
-          mockApplicationHistoryManager(1, 1, 1));
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+          mockApplicationHistoryClientService(1, 1, 1));
     ContainerPage containerPageInstance =
         injector.getInstance(ContainerPage.class);
 
@@ -141,10 +142,12 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
     WebAppTests.flushOutput(injector);
   }
 
-  ApplicationHistoryManager mockApplicationHistoryManager(int numApps,
+  ApplicationHistoryClientService mockApplicationHistoryClientService(int numApps,
       int numAppAttempts, int numContainers) throws Exception {
     ApplicationHistoryManager ahManager =
         new MockApplicationHistoryManagerImpl(store);
+    ApplicationHistoryClientService historyClientService =
+        new ApplicationHistoryClientService(ahManager);
     for (int i = 1; i <= numApps; ++i) {
       ApplicationId appId = ApplicationId.newInstance(0, i);
       writeApplicationStartData(appId);
@@ -161,7 +164,7 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
       }
       writeApplicationFinishData(appId);
     }
-    return ahManager;
+    return historyClientService;
   }
 
   class MockApplicationHistoryManagerImpl extends ApplicationHistoryManagerImpl {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
index 41dda91..913b80d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -28,13 +28,11 @@ import java.util.Properties;
 import javax.servlet.FilterConfig;
 import javax.servlet.ServletException;
 import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.lib.StaticUserWebFilter.StaticUserFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -44,7 +42,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManagerOnTimelineStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryManagerOnTimelineStore;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
@@ -79,7 +77,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 @RunWith(Parameterized.class)
 public class TestAHSWebServices extends JerseyTestBase {
 
-  private static ApplicationHistoryManagerOnTimelineStore historyManager;
+  private static ApplicationHistoryClientService historyClientService;
   private static final String[] USERS = new String[] { "foo" , "bar" };
 
   @BeforeClass
@@ -93,16 +91,23 @@ public class TestAHSWebServices extends JerseyTestBase {
     conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
     conf.set(YarnConfiguration.YARN_ADMIN_ACL, "foo");
     ApplicationACLsManager appAclsManager = new ApplicationACLsManager(conf);
-    historyManager =
+    ApplicationHistoryManagerOnTimelineStore historyManager =
         new ApplicationHistoryManagerOnTimelineStore(dataManager, appAclsManager);
     historyManager.init(conf);
-    historyManager.start();
+    historyClientService = new ApplicationHistoryClientService(historyManager) {
+      @Override
+      protected void serviceStart() throws Exception {
+        // Do Nothing
+      }
+    };
+    historyClientService.init(conf);
+    historyClientService.start();
   }
 
   @AfterClass
   public static void tearDownClass() throws Exception {
-    if (historyManager != null) {
-      historyManager.stop();
+    if (historyClientService != null) {
+      historyClientService.stop();
     }
   }
 
@@ -118,7 +123,7 @@ public class TestAHSWebServices extends JerseyTestBase {
       bind(JAXBContextResolver.class);
       bind(AHSWebServices.class);
       bind(GenericExceptionHandler.class);
-      bind(ApplicationContext.class).toInstance(historyManager);
+      bind(ApplicationBaseProtocol.class).toInstance(historyClientService);
       serve("/*").with(GuiceContainer.class);
       filter("/*").through(TestSimpleAuthFilter.class);
     }
@@ -372,5 +377,4 @@ public class TestAHSWebServices extends JerseyTestBase {
     assertEquals(ContainerState.COMPLETE.toString(),
       container.getString("containerState"));
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java
deleted file mode 100644
index 0e2ffdf..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.api;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-
-public interface ApplicationContext {
-  /**
-   * This method returns Application {@link ApplicationReport} for the specified
-   * {@link ApplicationId}.
-   * 
-   * @param appId
-   * 
-   * @return {@link ApplicationReport} for the ApplicationId.
-   * @throws YarnException
-   * @throws IOException
-   */
-  ApplicationReport getApplication(ApplicationId appId)
-      throws YarnException, IOException;
-
-  /**
-   * This method returns all Application {@link ApplicationReport}s
-   * 
-   * @return map of {@link ApplicationId} to {@link ApplicationReport}s.
-   * @throws YarnException
-   * @throws IOException
-   */
-  Map<ApplicationId, ApplicationReport> getAllApplications()
-      throws YarnException, IOException;
-
-  /**
-   * Application can have multiple application attempts
-   * {@link ApplicationAttemptReport}. This method returns the all
-   * {@link ApplicationAttemptReport}s for the Application.
-   * 
-   * @param appId
-   * 
-   * @return all {@link ApplicationAttemptReport}s for the Application.
-   * @throws YarnException
-   * @throws IOException
-   */
-  Map<ApplicationAttemptId, ApplicationAttemptReport> getApplicationAttempts(
-      ApplicationId appId) throws YarnException, IOException;
-
-  /**
-   * This method returns {@link ApplicationAttemptReport} for specified
-   * {@link ApplicationId}.
-   * 
-   * @param appAttemptId
-   *          {@link ApplicationAttemptId}
-   * @return {@link ApplicationAttemptReport} for ApplicationAttemptId
-   * @throws YarnException
-   * @throws IOException
-   */
-  ApplicationAttemptReport getApplicationAttempt(
-      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
-
-  /**
-   * This method returns {@link ContainerReport} for specified
-   * {@link ContainerId}.
-   * 
-   * @param containerId
-   *          {@link ContainerId}
-   * @return {@link ContainerReport} for ContainerId
-   * @throws YarnException
-   * @throws IOException
-   */
-  ContainerReport getContainer(ContainerId containerId)
-      throws YarnException, IOException;
-
-  /**
-   * This method returns {@link ContainerReport} for specified
-   * {@link ApplicationAttemptId}.
-   * 
-   * @param appAttemptId
-   *          {@link ApplicationAttemptId}
-   * @return {@link ContainerReport} for ApplicationAttemptId
-   * @throws YarnException
-   * @throws IOException
-   */
-  ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
-      throws YarnException, IOException;
-
-  /**
-   * This method returns Map of {@link ContainerId} to {@link ContainerReport}
-   * for specified {@link ApplicationAttemptId}.
-   * 
-   * @param appAttemptId
-   *          {@link ApplicationAttemptId}
-   * @return Map of {@link ContainerId} to {@link ContainerReport} for
-   *         ApplicationAttemptId
-   * @throws YarnException
-   * @throws IOException
-   */
-  Map<ContainerId, ContainerReport> getContainers(
-      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index 4a02892..ea33f4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -27,10 +27,14 @@ import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -45,11 +49,12 @@ import com.google.inject.Inject;
 public class AppAttemptBlock extends HtmlBlock {
 
   private static final Log LOG = LogFactory.getLog(AppAttemptBlock.class);
-  private final ApplicationContext appContext;
+  protected ApplicationBaseProtocol appBaseProt;
 
   @Inject
-  public AppAttemptBlock(ApplicationContext appContext) {
-    this.appContext = appContext;
+  public AppAttemptBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
+    super(ctx);
+    this.appBaseProt = appBaseProt;
   }
 
   @Override
@@ -68,18 +73,22 @@ public class AppAttemptBlock extends HtmlBlock {
       return;
     }
 
-    final ApplicationAttemptId appAttemptIdFinal = appAttemptId;
     UserGroupInformation callerUGI = getCallerUGI();
-    ApplicationAttemptReport appAttemptReport;
+    ApplicationAttemptReport appAttemptReport = null;
     try {
+      final GetApplicationAttemptReportRequest request =
+          GetApplicationAttemptReportRequest.newInstance(appAttemptId);
       if (callerUGI == null) {
-        appAttemptReport = appContext.getApplicationAttempt(appAttemptId);
+        appAttemptReport =
+            appBaseProt.getApplicationAttemptReport(request)
+              .getApplicationAttemptReport();
       } else {
         appAttemptReport = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationAttemptReport> () {
           @Override
           public ApplicationAttemptReport run() throws Exception {
-            return appContext.getApplicationAttempt(appAttemptIdFinal);
+            return appBaseProt.getApplicationAttemptReport(request)
+                .getApplicationAttemptReport();
           }
         });
       }
@@ -90,10 +99,35 @@ public class AppAttemptBlock extends HtmlBlock {
       html.p()._(message)._();
       return;
     }
+
     if (appAttemptReport == null) {
       puts("Application Attempt not found: " + attemptid);
       return;
     }
+
+    boolean exceptionWhenGetContainerReports = false;
+    Collection<ContainerReport> containers = null;
+    try {
+      final GetContainersRequest request =
+          GetContainersRequest.newInstance(appAttemptId);
+      if (callerUGI == null) {
+        containers = appBaseProt.getContainers(request).getContainerList();
+      } else {
+        containers = callerUGI.doAs(
+            new PrivilegedExceptionAction<Collection<ContainerReport>> () {
+          @Override
+          public Collection<ContainerReport> run() throws Exception {
+            return  appBaseProt.getContainers(request).getContainerList();
+          }
+        });
+      }
+    } catch (RuntimeException e) {
+      // have this block to suppress the findbugs warning
+      exceptionWhenGetContainerReports = true;
+    } catch (Exception e) {
+      exceptionWhenGetContainerReports = true;
+    }
+
     AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport);
 
     setTitle(join("Application Attempt ", attemptid));
@@ -104,43 +138,35 @@ public class AppAttemptBlock extends HtmlBlock {
       node = appAttempt.getHost() + ":" + appAttempt.getRpcPort();
     }
     info("Application Attempt Overview")
-      ._("State", appAttempt.getAppAttemptState())
       ._(
-        "Master Container",
-        appAttempt.getAmContainerId() == null ? "#" : root_url("container",
-          appAttempt.getAmContainerId()),
+        "Application Attempt State:",
+        appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
+          .getAppAttemptState())
+      ._(
+        "AM Container:",
+        appAttempt.getAmContainerId() == null || containers == null
+            || !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
+            ? null : root_url("container", appAttempt.getAmContainerId()),
         String.valueOf(appAttempt.getAmContainerId()))
       ._("Node:", node)
       ._(
         "Tracking URL:",
-        appAttempt.getTrackingUrl() == null ? "#" : root_url(appAttempt
-          .getTrackingUrl()), "History")
-      ._("Diagnostics Info:", appAttempt.getDiagnosticsInfo());
+        appAttempt.getTrackingUrl() == null
+            || appAttempt.getTrackingUrl() == UNAVAILABLE ? null
+            : root_url(appAttempt.getTrackingUrl()),
+        appAttempt.getTrackingUrl() == null
+            || appAttempt.getTrackingUrl() == UNAVAILABLE
+            ? "Unassigned"
+            : appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FINISHED
+                || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FAILED
+                || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.KILLED
+                ? "History" : "ApplicationMaster")
+      ._("Diagnostics Info:", appAttempt.getDiagnosticsInfo() == null ?
+          "" : appAttempt.getDiagnosticsInfo());
 
     html._(InfoBlock.class);
 
-    Collection<ContainerReport> containers;
-    try {
-      if (callerUGI == null) {
-        containers = appContext.getContainers(appAttemptId).values();
-      } else {
-        containers = callerUGI.doAs(
-            new PrivilegedExceptionAction<Collection<ContainerReport>> () {
-          @Override
-          public Collection<ContainerReport> run() throws Exception {
-            return  appContext.getContainers(appAttemptIdFinal).values();
-          }
-        });
-      }
-    } catch (RuntimeException e) {
-      // have this block to suppress the findbugs warning
-      html
-      .p()
-      ._(
-        "Sorry, Failed to get containers for application attempt" + attemptid
-            + ".")._();
-      return;
-    } catch (Exception e) {
+    if (exceptionWhenGetContainerReports) {
       html
         .p()
         ._(
@@ -166,11 +192,12 @@ public class AppAttemptBlock extends HtmlBlock {
         .append("'>")
         .append(container.getContainerId())
         .append("</a>\",\"<a href='")
-        .append(container.getAssignedNodeId())
+        .append("#") // TODO: replace with node http address (YARN-1884)
         .append("'>")
-        .append(
-          StringEscapeUtils.escapeJavaScript(StringEscapeUtils
-            .escapeHtml(container.getAssignedNodeId()))).append("</a>\",\"")
+        .append(container.getAssignedNodeId() == null ? "N/A" :
+            StringEscapeUtils.escapeJavaScript(StringEscapeUtils
+                .escapeHtml(container.getAssignedNodeId())))
+        .append("</a>\",\"")
         .append(container.getContainerExitStatus()).append("\",\"<a href='")
         .append(container.getLogUrl() == null ?
             "#" : container.getLogUrl()).append("'>")
@@ -187,4 +214,14 @@ public class AppAttemptBlock extends HtmlBlock {
 
     tbody._()._();
   }
+
+  private boolean hasAMContainer(ContainerId containerId,
+      Collection<ContainerReport> containers) {
+    for (ContainerReport container : containers) {
+      if (containerId.equals(container.getContainerId())) {
+        return true;
+      }
+    }
+    return false;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 8fa4086..2db88ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -20,24 +20,47 @@ package org.apache.hadoop.yarn.server.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.WEB_UI_TYPE;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
+import java.util.List;
 
 import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
@@ -47,16 +70,20 @@ import com.google.inject.Inject;
 
 public class AppBlock extends HtmlBlock {
 
-  protected ApplicationContext appContext;
+  private static final Log LOG = LogFactory.getLog(AppBlock.class);
+  protected ApplicationBaseProtocol appBaseProt;
+  protected Configuration conf;
 
   @Inject
-  AppBlock(ApplicationContext appContext, ViewContext ctx) {
+  AppBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx, Configuration conf) {
     super(ctx);
-    this.appContext = appContext;
+    this.appBaseProt = appBaseProt;
+    this.conf = conf;
   }
 
   @Override
   protected void render(Block html) {
+    String webUiType = $(WEB_UI_TYPE);
     String aid = $(APPLICATION_ID);
     if (aid.isEmpty()) {
       puts("Bad request: requires Application ID");
@@ -71,18 +98,21 @@ public class AppBlock extends HtmlBlock {
       return;
     }
 
-    final ApplicationId appIDFinal = appID;
     UserGroupInformation callerUGI = getCallerUGI();
-    ApplicationReport appReport;
+    ApplicationReport appReport = null;
     try {
+      final GetApplicationReportRequest request =
+          GetApplicationReportRequest.newInstance(appID);
       if (callerUGI == null) {
-        appReport = appContext.getApplication(appID);
+        appReport =
+            appBaseProt.getApplicationReport(request).getApplicationReport();
       } else {
         appReport = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationReport> () {
           @Override
           public ApplicationReport run() throws Exception {
-            return appContext.getApplication(appIDFinal);
+            return appBaseProt.getApplicationReport(request)
+                .getApplicationReport();
           }
         });
       }
@@ -92,41 +122,90 @@ public class AppBlock extends HtmlBlock {
       html.p()._(message)._();
       return;
     }
+
     if (appReport == null) {
       puts("Application not found: " + aid);
       return;
     }
+
     AppInfo app = new AppInfo(appReport);
 
     setTitle(join("Application ", aid));
 
+    if (webUiType != null
+        && webUiType.equals(YarnWebParams.RM_WEB_UI)
+        && conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
+          YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED)) {
+      // Application Kill
+      html.div()
+        .button()
+          .$onclick("confirmAction()").b("Kill Application")._()
+          ._();
+
+      StringBuilder script = new StringBuilder();
+      script.append("function confirmAction() {")
+          .append(" b = confirm(\"Are you sure?\");")
+          .append(" if (b == true) {")
+          .append(" $.ajax({")
+          .append(" type: 'PUT',")
+          .append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
+          .append(" contentType: 'application/json',")
+          .append(" data: '{\"state\":\"KILLED\"}',")
+          .append(" dataType: 'json'")
+          .append(" }).done(function(data){")
+          .append(" setTimeout(function(){")
+          .append(" location.href = '/cluster/app/").append(aid).append("';")
+          .append(" }, 1000);")
+          .append(" }).fail(function(data){")
+          .append(" console.log(data);")
+          .append(" });")
+          .append(" }")
+          .append("}");
+
+      html.script().$type("text/javascript")._(script.toString())._();
+    }
+
     info("Application Overview")
       ._("User:", app.getUser())
       ._("Name:", app.getName())
       ._("Application Type:", app.getType())
-      ._("State:", app.getAppState())
-      ._("FinalStatus:", app.getFinalAppStatus())
+      ._("Application Tags:",
+        app.getApplicationTags() == null ? "" : app.getApplicationTags())
+      ._("YarnApplicationState:",
+        app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app
+          .getAppState()))
+      ._("FinalStatus Reported by AM:",
+        clairfyAppFinalStatus(app.getFinalAppStatus()))
       ._("Started:", Times.format(app.getStartedTime()))
       ._(
         "Elapsed:",
         StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
           app.getFinishedTime())))
       ._("Tracking URL:",
-        app.getTrackingUrl() == null ? "#" : root_url(app.getTrackingUrl()),
-        "History")._("Diagnostics:", app.getDiagnosticsInfo());
-
-    html._(InfoBlock.class);
+        app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+            ? null : root_url(app.getTrackingUrl()),
+        app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+            ? "Unassigned" : app.getAppState() == YarnApplicationState.FINISHED
+                || app.getAppState() == YarnApplicationState.FAILED
+                || app.getAppState() == YarnApplicationState.KILLED ? "History"
+                : "ApplicationMaster")
+      ._("Diagnostics:",
+          app.getDiagnosticsInfo() == null ? "" : app.getDiagnosticsInfo());
 
     Collection<ApplicationAttemptReport> attempts;
     try {
+      final GetApplicationAttemptsRequest request =
+          GetApplicationAttemptsRequest.newInstance(appID);
       if (callerUGI == null) {
-        attempts = appContext.getApplicationAttempts(appID).values();
+        attempts = appBaseProt.getApplicationAttempts(request)
+            .getApplicationAttemptList();
       } else {
         attempts = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
           @Override
           public Collection<ApplicationAttemptReport> run() throws Exception {
-            return appContext.getApplicationAttempts(appIDFinal).values();
+            return appBaseProt.getApplicationAttempts(request)
+                .getApplicationAttemptList();
           }
         });
       }
@@ -138,6 +217,34 @@ public class AppBlock extends HtmlBlock {
       return;
     }
 
+    //TODO:YARN-3284
+    //The preemption metrics will be exposed from ApplicationReport
+    // and ApplicationAttemptReport
+    ApplicationResourceUsageReport usageReport =
+        appReport.getApplicationResourceUsageReport();
+    DIV<Hamlet> pdiv = html.
+        _(InfoBlock.class).
+        div(_INFO_WRAP);
+    info("Application Overview").clear();
+    info("Application Metrics")
+        ._("Total Resource Preempted:",
+          Resources.none()) // TODO: YARN-3284
+        ._("Total Number of Non-AM Containers Preempted:",
+          String.valueOf(0)) // TODO: YARN-3284
+        ._("Total Number of AM Containers Preempted:",
+          String.valueOf(0)) // TODO: YARN-3284
+        ._("Resource Preempted from Current Attempt:",
+          Resources.none()) // TODO: YARN-3284
+        ._("Number of Non-AM Containers Preempted from Current Attempt:",
+          0) // TODO: YARN-3284
+        ._("Aggregate Resource Allocation:",
+          String.format("%d MB-seconds, %d vcore-seconds", usageReport == null
+            ? 0 : usageReport.getMemorySeconds(), usageReport == null ? 0
+            : usageReport.getVcoreSeconds()));
+    pdiv._();
+
+    html._(InfoBlock.class);
+
     // Application Attempt Table
     TBODY<TABLE<Hamlet>> tbody =
         html.table("#attempts").thead().tr().th(".id", "Attempt ID")
@@ -147,18 +254,28 @@ public class AppBlock extends HtmlBlock {
     StringBuilder attemptsTableData = new StringBuilder("[\n");
     for (final ApplicationAttemptReport appAttemptReport : attempts) {
       AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport);
-      ContainerReport containerReport;
+      ContainerReport containerReport = null;
       try {
+        // AM container is always the first container of the attempt
+        final GetContainerReportRequest request =
+            GetContainerReportRequest.newInstance(ContainerId.newContainerId(
+              appAttemptReport.getApplicationAttemptId(), 1));
         if (callerUGI == null) {
-          containerReport = appContext.getAMContainer(appAttemptReport
-              .getApplicationAttemptId());
+          containerReport =
+              appBaseProt.getContainerReport(request).getContainerReport();
         } else {
           containerReport = callerUGI.doAs(
               new PrivilegedExceptionAction<ContainerReport> () {
             @Override
             public ContainerReport run() throws Exception {
-              return appContext.getAMContainer(appAttemptReport
-                  .getApplicationAttemptId());
+              ContainerReport report = null;
+              try {
+                report = appBaseProt.getContainerReport(request)
+                    .getContainerReport();
+              } catch (ContainerNotFoundException ex) {
+                LOG.warn(ex.getMessage());
+              }
+              return report;
             }
           });
         }
@@ -170,7 +287,7 @@ public class AppBlock extends HtmlBlock {
         html.p()._(message)._();
         return;
       }
-      long startTime = Long.MAX_VALUE;
+      long startTime = 0L;
       String logsLink = null;
       if (containerReport != null) {
         ContainerInfo container = new ContainerInfo(containerReport);
@@ -192,14 +309,12 @@ public class AppBlock extends HtmlBlock {
         .append("</a>\",\"")
         .append(startTime)
         .append("\",\"<a href='")
-        .append(
-          nodeLink == null ? "#" : url("//", nodeLink))
+        .append("#") // TODO: replace with node http address (YARN-1884)
         .append("'>")
-        .append(
-          nodeLink == null ? "N/A" : StringEscapeUtils
+        .append(nodeLink == null ? "N/A" : StringEscapeUtils
             .escapeJavaScript(StringEscapeUtils.escapeHtml(nodeLink)))
-        .append("</a>\",\"<a href='")
-        .append(logsLink == null ? "#" : logsLink).append("'>")
+        .append("</a>\",\"<a ")
+        .append(logsLink == null ? "#" : "href='" + logsLink).append("'>")
         .append(logsLink == null ? "N/A" : "Logs").append("</a>\"],\n");
     }
     if (attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') {
@@ -211,5 +326,108 @@ public class AppBlock extends HtmlBlock {
       ._("var attemptsTableData=" + attemptsTableData)._();
 
     tbody._()._();
+
+    createContainerLocalityTable(html); //TODO:YARN-3284
+    createResourceRequestsTable(html, null); //TODO:YARN-3284
+  }
+
+  //TODO: YARN-3284
+  //The containerLocality metrics will be exposed from AttemptReport
+  private void createContainerLocalityTable(Block html) {
+    int totalAllocatedContainers = 0; //TODO: YARN-3284
+    int[][] localityStatistics = new int[0][0];//TODO:YARN-3284
+    DIV<Hamlet> div = html.div(_INFO_WRAP);
+    TABLE<DIV<Hamlet>> table =
+        div.h3(
+          "Total Allocated Containers: "
+              + totalAllocatedContainers).h3("Each table cell"
+            + " represents the number of NodeLocal/RackLocal/OffSwitch containers"
+            + " satisfied by NodeLocal/RackLocal/OffSwitch resource requests.").table(
+          "#containerLocality");
+    table.
+      tr().
+        th(_TH, "").
+        th(_TH, "Node Local Request").
+        th(_TH, "Rack Local Request").
+        th(_TH, "Off Switch Request").
+      _();
+
+    String[] containersType =
+        { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
+            "Num Off Switch Containers (satisfied by)" };
+    boolean odd = false;
+    for (int i = 0; i < localityStatistics.length; i++) {
+      table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
+        .td(String.valueOf(localityStatistics[i][0]))
+        .td(i == 0 ? "" : String.valueOf(localityStatistics[i][1]))
+        .td(i <= 1 ? "" : String.valueOf(localityStatistics[i][2]))._();
+    }
+    table._();
+    div._();
+  }
+
+  //TODO:YARN-3284
+  //The resource requests metrics will be exposed from attemptReport
+  private void createResourceRequestsTable(Block html, List<ResourceRequest> resouceRequests) {
+    TBODY<TABLE<Hamlet>> tbody =
+        html.table("#ResourceRequests").thead().tr()
+          .th(".priority", "Priority")
+          .th(".resourceName", "ResourceName")
+          .th(".totalResource", "Capability")
+          .th(".numContainers", "NumContainers")
+          .th(".relaxLocality", "RelaxLocality")
+          .th(".nodeLabelExpression", "NodeLabelExpression")._()._().tbody();
+
+    Resource totalResource = Resource.newInstance(0, 0);
+    if (resouceRequests != null) {
+      for (ResourceRequest request : resouceRequests) {
+        if (request.getNumContainers() == 0) {
+          continue;
+        }
+
+        tbody.tr()
+          .td(String.valueOf(request.getPriority()))
+          .td(request.getResourceName())
+          .td(String.valueOf(request.getCapability()))
+          .td(String.valueOf(request.getNumContainers()))
+          .td(String.valueOf(request.getRelaxLocality()))
+          .td(request.getNodeLabelExpression() == null ? "N/A" : request
+              .getNodeLabelExpression())._();
+        if (request.getResourceName().equals(ResourceRequest.ANY)) {
+          Resources.addTo(totalResource,
+            Resources.multiply(request.getCapability(),
+              request.getNumContainers()));
+        }
+      }
+    }
+    html.div().$class("totalResourceRequests")
+      .h3("Total Outstanding Resource Requests: " + totalResource)._();
+    tbody._()._();
+  }
+
+  private String clarifyAppState(YarnApplicationState state) {
+    String ret = state.toString();
+    switch (state) {
+    case NEW:
+      return ret + ": waiting for application to be initialized";
+    case NEW_SAVING:
+      return ret + ": waiting for application to be persisted in state-store.";
+    case SUBMITTED:
+      return ret + ": waiting for application to be accepted by scheduler.";
+    case ACCEPTED:
+      return ret + ": waiting for AM container to be allocated, launched and"
+          + " register with RM.";
+    case RUNNING:
+      return ret + ": AM has registered with RM and started running.";
+    default:
+      return ret;
+    }
+  }
+
+  private String clairfyAppFinalStatus(FinalApplicationStatus status) {
+    if (status == FinalApplicationStatus.UNDEFINED) {
+      return "Application has not completed yet.";
+    }
+    return status.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
index f341cf6..161486d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
@@ -25,13 +25,16 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
 
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
-import java.util.HashSet;
+import java.util.EnumSet;
 
 import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
@@ -42,12 +45,13 @@ import com.google.inject.Inject;
 
 public class AppsBlock extends HtmlBlock {
 
-  protected ApplicationContext appContext;
+  private static final Log LOG = LogFactory.getLog(AppsBlock.class);
+  protected ApplicationBaseProtocol appBaseProt;
 
   @Inject
-  AppsBlock(ApplicationContext appContext, ViewContext ctx) {
+  AppsBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
     super(ctx);
-    this.appContext = appContext;
+    this.appBaseProt = appBaseProt;
   }
 
   @Override
@@ -61,27 +65,29 @@ public class AppsBlock extends HtmlBlock {
           .th(".finishtime", "FinishTime").th(".state", "State")
           .th(".finalstatus", "FinalStatus").th(".progress", "Progress")
           .th(".ui", "Tracking UI")._()._().tbody();
-    Collection<YarnApplicationState> reqAppStates = null;
+    EnumSet<YarnApplicationState> reqAppStates =
+        EnumSet.noneOf(YarnApplicationState.class);
     String reqStateString = $(APP_STATE);
     if (reqStateString != null && !reqStateString.isEmpty()) {
       String[] appStateStrings = reqStateString.split(",");
-      reqAppStates = new HashSet<YarnApplicationState>(appStateStrings.length);
       for (String stateString : appStateStrings) {
-        reqAppStates.add(YarnApplicationState.valueOf(stateString));
+        reqAppStates.add(YarnApplicationState.valueOf(stateString.trim()));
       }
     }
 
     UserGroupInformation callerUGI = getCallerUGI();
-    Collection<ApplicationReport> appReports;
+    Collection<ApplicationReport> appReports = null;
     try {
+      final GetApplicationsRequest request =
+          GetApplicationsRequest.newInstance(reqAppStates);
       if (callerUGI == null) {
-        appReports = appContext.getAllApplications().values();
+        appReports = appBaseProt.getApplications(request).getApplicationList();
       } else {
         appReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationReport>> () {
           @Override
           public Collection<ApplicationReport> run() throws Exception {
-            return appContext.getAllApplications().values();
+            return appBaseProt.getApplications(request).getApplicationList();
           }
         });
       }
@@ -93,12 +99,15 @@ public class AppsBlock extends HtmlBlock {
     }
     StringBuilder appsTableData = new StringBuilder("[\n");
     for (ApplicationReport appReport : appReports) {
-      if (reqAppStates != null
+      // TODO: remove the following condition. It is still here because
+      // the history side implementation of ApplicationBaseProtocol
+      // hasn't filtering capability (YARN-1819).
+      if (!reqAppStates.isEmpty()
           && !reqAppStates.contains(appReport.getYarnApplicationState())) {
         continue;
       }
       AppInfo app = new AppInfo(appReport);
-      String percent = String.format("%.1f", app.getProgress() * 100.0F);
+      String percent = String.format("%.1f", app.getProgress());
       // AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
       appsTableData
         .append("[\"<a href='")
@@ -123,7 +132,7 @@ public class AppsBlock extends HtmlBlock {
             .getQueue()))).append("\",\"").append(app.getStartedTime())
         .append("\",\"").append(app.getFinishedTime())
         .append("\",\"")
-        .append(app.getAppState())
+        .append(app.getAppState() == null ? UNAVAILABLE : app.getAppState())
         .append("\",\"")
         .append(app.getFinalAppStatus())
         .append("\",\"")
@@ -132,13 +141,21 @@ public class AppsBlock extends HtmlBlock {
         .append(C_PROGRESSBAR).append("' title='").append(join(percent, '%'))
         .append("'> ").append("<div class='").append(C_PROGRESSBAR_VALUE)
         .append("' style='").append(join("width:", percent, '%'))
-        .append("'> </div> </div>").append("\",\"<a href='");
+        .append("'> </div> </div>").append("\",\"<a ");
 
       String trackingURL =
-          app.getTrackingUrl() == null ? "#" : app.getTrackingUrl();
+          app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+              ? null : app.getTrackingUrl();
 
-      appsTableData.append(trackingURL).append("'>").append("History")
-        .append("</a>\"],\n");
+      String trackingUI =
+          app.getTrackingUrl() == null || app.getTrackingUrl() == UNAVAILABLE
+              ? "Unassigned"
+              : app.getAppState() == YarnApplicationState.FINISHED
+                  || app.getAppState() == YarnApplicationState.FAILED
+                  || app.getAppState() == YarnApplicationState.KILLED
+                  ? "History" : "ApplicationMaster";
+      appsTableData.append(trackingURL == null ? "#" : "href='" + trackingURL)
+        .append("'>").append(trackingUI).append("</a>\"],\n");
 
     }
     if (appsTableData.charAt(appsTableData.length() - 2) == ',') {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
index 2bb48a8..ed50c7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java
@@ -26,9 +26,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
@@ -40,12 +41,12 @@ import com.google.inject.Inject;
 public class ContainerBlock extends HtmlBlock {
 
   private static final Log LOG = LogFactory.getLog(ContainerBlock.class);
-  private final ApplicationContext appContext;
+  protected ApplicationBaseProtocol appBaseProt;
 
   @Inject
-  public ContainerBlock(ApplicationContext appContext, ViewContext ctx) {
+  public ContainerBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx) {
     super(ctx);
-    this.appContext = appContext;
+    this.appBaseProt = appBaseProt;
   }
 
   @Override
@@ -64,18 +65,21 @@ public class ContainerBlock extends HtmlBlock {
       return;
     }
 
-    final ContainerId containerIdFinal = containerId;
     UserGroupInformation callerUGI = getCallerUGI();
-    ContainerReport containerReport;
+    ContainerReport containerReport = null;
     try {
+      final GetContainerReportRequest request =
+          GetContainerReportRequest.newInstance(containerId);
       if (callerUGI == null) {
-        containerReport = appContext.getContainer(containerId);
+        containerReport = appBaseProt.getContainerReport(request)
+            .getContainerReport();
       } else {
         containerReport = callerUGI.doAs(
             new PrivilegedExceptionAction<ContainerReport> () {
           @Override
           public ContainerReport run() throws Exception {
-            return appContext.getContainer(containerIdFinal);
+            return appBaseProt.getContainerReport(request)
+                .getContainerReport();
           }
         });
       }
@@ -85,6 +89,7 @@ public class ContainerBlock extends HtmlBlock {
       html.p()._(message)._();
       return;
     }
+
     if (containerReport == null) {
       puts("Container not found: " + containerid);
       return;
@@ -94,7 +99,10 @@ public class ContainerBlock extends HtmlBlock {
     setTitle(join("Container ", containerid));
 
     info("Container Overview")
-      ._("State:", container.getContainerState())
+      ._(
+        "Container State:",
+        container.getContainerState() == null ? UNAVAILABLE : container
+          .getContainerState())
       ._("Exit Status:", container.getContainerExitStatus())
       ._("Node:", container.getAssignedNodeId())
       ._("Priority:", container.getPriority())
@@ -109,7 +117,8 @@ public class ContainerBlock extends HtmlBlock {
             + container.getAllocatedVCores() + " VCores")
       ._("Logs:", container.getLogUrl() == null ? "#" : container.getLogUrl(),
           container.getLogUrl() == null ? "N/A" : "Logs")
-      ._("Diagnostics:", container.getDiagnosticsInfo());
+      ._("Diagnostics:", container.getDiagnosticsInfo() == null ?
+          "" : container.getDiagnosticsInfo());
 
     html._(InfoBlock.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
new file mode 100644
index 0000000..384a976
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.webapp;
+
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
+
+
+public class WebPageUtils {
+
+  public static String appsTableInit() {
+    return appsTableInit(false);
+  }
+
+  public static String appsTableInit(boolean isFairSchedulerPage) {
+    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
+    // FairSchedulerPage's table is a bit different
+    return tableInit()
+      .append(", 'aaData': appsTableData")
+      .append(", bDeferRender: true")
+      .append(", bProcessing: true")
+      .append("\n, aoColumnDefs: ")
+      .append(getAppsTableColumnDefs(isFairSchedulerPage))
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  private static String getAppsTableColumnDefs(boolean isFairSchedulerPage) {
+    StringBuilder sb = new StringBuilder();
+    return sb
+      .append("[\n")
+      .append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }")
+      .append("\n, {'sType':'numeric', 'aTargets': " +
+          (isFairSchedulerPage ? "[6, 7]": "[5, 6]"))
+      .append(", 'mRender': renderHadoopDate }")
+      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
+      .append(", 'mRender': parseHadoopProgress }]").toString();
+  }
+
+  public static String attemptsTableInit() {
+    return tableInit().append(", 'aaData': attemptsTableData")
+      .append(", bDeferRender: true").append(", bProcessing: true")
+      .append("\n, aoColumnDefs: ").append(getAttemptsTableColumnDefs())
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  private static String getAttemptsTableColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }")
+      .append("\n, {'sType':'numeric', 'aTargets': [1]")
+      .append(", 'mRender': renderHadoopDate }]").toString();
+  }
+
+  public static String containersTableInit() {
+    return tableInit().append(", 'aaData': containersTableData")
+      .append(", bDeferRender: true").append(", bProcessing: true")
+      .append("\n, aoColumnDefs: ").append(getContainersTableColumnDefs())
+      // Sort by id upon page load
+      .append(", aaSorting: [[0, 'desc']]}").toString();
+  }
+
+  private static String getContainersTableColumnDefs() {
+    StringBuilder sb = new StringBuilder();
+    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
+      .append(", 'mRender': parseHadoopID }]").toString();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index 6d94737..909bf1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -40,7 +40,13 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
@@ -54,10 +60,10 @@ import org.apache.hadoop.yarn.webapp.NotFoundException;
 
 public class WebServices {
 
-  protected ApplicationContext appContext;
+  protected ApplicationBaseProtocol appBaseProt;
 
-  public WebServices(ApplicationContext appContext) {
-    this.appContext = appContext;
+  public WebServices(ApplicationBaseProtocol appBaseProt) {
+    this.appBaseProt = appBaseProt;
   }
 
   public AppsInfo getApps(HttpServletRequest req, HttpServletResponse res,
@@ -144,13 +150,17 @@ public class WebServices {
     Collection<ApplicationReport> appReports = null;
     try {
       if (callerUGI == null) {
-        appReports = appContext.getAllApplications().values();
+        // TODO: the request should take the params like what RMWebServices does
+        // in YARN-1819.
+        GetApplicationsRequest request = GetApplicationsRequest.newInstance();
+        appReports = appBaseProt.getApplications(request).getApplicationList();
       } else {
         appReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationReport>> () {
           @Override
           public Collection<ApplicationReport> run() throws Exception {
-            return appContext.getAllApplications().values();
+            return appBaseProt.getApplications(
+                GetApplicationsRequest.newInstance()).getApplicationList();
           }
         });
       }
@@ -214,13 +224,17 @@ public class WebServices {
     ApplicationReport app = null;
     try {
       if (callerUGI == null) {
-        app = appContext.getApplication(id);
+        GetApplicationReportRequest request =
+            GetApplicationReportRequest.newInstance(id);
+        app = appBaseProt.getApplicationReport(request).getApplicationReport();
       } else {
         app = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationReport> () {
           @Override
           public ApplicationReport run() throws Exception {
-            return appContext.getApplication(id);
+            GetApplicationReportRequest request =
+                GetApplicationReportRequest.newInstance(id);
+            return appBaseProt.getApplicationReport(request).getApplicationReport();
           }
         });
       }
@@ -240,13 +254,20 @@ public class WebServices {
     Collection<ApplicationAttemptReport> appAttemptReports = null;
     try {
       if (callerUGI == null) {
-        appAttemptReports = appContext.getApplicationAttempts(id).values();
+        GetApplicationAttemptsRequest request =
+            GetApplicationAttemptsRequest.newInstance(id);
+        appAttemptReports =
+            appBaseProt.getApplicationAttempts(request)
+              .getApplicationAttemptList();
       } else {
         appAttemptReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ApplicationAttemptReport>> () {
           @Override
           public Collection<ApplicationAttemptReport> run() throws Exception {
-            return appContext.getApplicationAttempts(id).values();
+            GetApplicationAttemptsRequest request =
+                GetApplicationAttemptsRequest.newInstance(id);
+            return appBaseProt.getApplicationAttempts(request)
+                  .getApplicationAttemptList();
           }
         });
       }
@@ -271,13 +292,20 @@ public class WebServices {
     ApplicationAttemptReport appAttempt = null;
     try {
       if (callerUGI == null) {
-        appAttempt = appContext.getApplicationAttempt(aaid);
+        GetApplicationAttemptReportRequest request =
+            GetApplicationAttemptReportRequest.newInstance(aaid);
+        appAttempt =
+            appBaseProt.getApplicationAttemptReport(request)
+              .getApplicationAttemptReport();
       } else {
         appAttempt = callerUGI.doAs(
             new PrivilegedExceptionAction<ApplicationAttemptReport> () {
           @Override
           public ApplicationAttemptReport run() throws Exception {
-            return appContext.getApplicationAttempt(aaid);
+            GetApplicationAttemptReportRequest request =
+                GetApplicationAttemptReportRequest.newInstance(aaid);
+            return appBaseProt.getApplicationAttemptReport(request)
+                  .getApplicationAttemptReport();
           }
         });
       }
@@ -300,13 +328,16 @@ public class WebServices {
     Collection<ContainerReport> containerReports = null;
     try {
       if (callerUGI == null) {
-        containerReports = appContext.getContainers(aaid).values();
+        GetContainersRequest request = GetContainersRequest.newInstance(aaid);
+        containerReports =
+            appBaseProt.getContainers(request).getContainerList();
       } else {
         containerReports = callerUGI.doAs(
             new PrivilegedExceptionAction<Collection<ContainerReport>> () {
           @Override
           public Collection<ContainerReport> run() throws Exception {
-            return appContext.getContainers(aaid).values();
+            GetContainersRequest request = GetContainersRequest.newInstance(aaid);
+            return appBaseProt.getContainers(request).getContainerList();
           }
         });
       }
@@ -332,13 +363,18 @@ public class WebServices {
     ContainerReport container = null;
     try {
       if (callerUGI == null) {
-        container = appContext.getContainer(cid);
+        GetContainerReportRequest request =
+            GetContainerReportRequest.newInstance(cid);
+        container =
+            appBaseProt.getContainerReport(request).getContainerReport();
       } else {
         container = callerUGI.doAs(
             new PrivilegedExceptionAction<ContainerReport> () {
           @Override
           public ContainerReport run() throws Exception {
-            return appContext.getContainer(cid);
+            GetContainerReportRequest request =
+                GetContainerReportRequest.newInstance(cid);
+            return appBaseProt.getContainerReport(request).getContainerReport();
           }
         });
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index d78f928..e8b1acc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.webapp.dao;
 
+import static org.apache.hadoop.yarn.util.StringHelper.CSV_JOINER;
+
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
@@ -49,6 +51,7 @@ public class AppInfo {
   protected long startedTime;
   protected long finishedTime;
   protected long elapsedTime;
+  protected String applicationTags;
 
   public AppInfo() {
     // JAXB needs this
@@ -74,7 +77,10 @@ public class AppInfo {
     finishedTime = app.getFinishTime();
     elapsedTime = Times.elapsed(startedTime, finishedTime);
     finalAppStatus = app.getFinalApplicationStatus();
-    progress = app.getProgress();
+    progress = app.getProgress() * 100; // in percent
+    if (app.getApplicationTags() != null && !app.getApplicationTags().isEmpty()) {
+      this.applicationTags = CSV_JOINER.join(app.getApplicationTags());
+    }
   }
 
   public String getAppId() {
@@ -149,4 +155,7 @@ public class AppInfo {
     return elapsedTime;
   }
 
+  public String getApplicationTags() {
+    return applicationTags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
new file mode 100644
index 0000000..92eae48
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
+
+import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+
+public class AppAttemptPage extends RmView {
+
+  @Override
+  protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+
+    String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID);
+    set(
+      TITLE,
+      appAttemptId.isEmpty() ? "Bad request: missing application attempt ID"
+          : join("Application Attempt ",
+            $(YarnWebParams.APPLICATION_ATTEMPT_ID)));
+
+    set(DATATABLES_ID, "containers");
+    set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
+    setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
+  }
+
+  @Override
+  protected Class<? extends SubView> content() {
+    return AppAttemptBlock.class;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
deleted file mode 100644
index 00508b8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.apache.hadoop.yarn.util.StringHelper.join;
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
-
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.QueueACL;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.util.Apps;
-import org.apache.hadoop.yarn.util.Times;
-import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-import org.apache.hadoop.yarn.webapp.view.InfoBlock;
-
-import com.google.inject.Inject;
-
-public class AppBlock extends HtmlBlock {
-
-  private final Configuration conf;
-  private final ResourceManager rm;
-  private final boolean rmWebAppUIActions;
-
-  @Inject
-  AppBlock(ResourceManager rm, ViewContext ctx, Configuration conf) {
-    super(ctx);
-    this.conf = conf;
-    this.rm = rm;
-    this.rmWebAppUIActions =
-        conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
-                YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED);
-  }
-
-  @Override
-  protected void render(Block html) {
-    String aid = $(APPLICATION_ID);
-    if (aid.isEmpty()) {
-      puts("Bad request: requires application ID");
-      return;
-    }
-
-    ApplicationId appID = null;
-    try {
-      appID = Apps.toAppID(aid);
-    } catch (Exception e) {
-      puts("Invalid Application ID: " + aid);
-      return;
-    }
-
-    RMContext context = this.rm.getRMContext();
-    RMApp rmApp = context.getRMApps().get(appID);
-    if (rmApp == null) {
-      puts("Application not found: "+ aid);
-      return;
-    }
-    AppInfo app =
-        new AppInfo(rm, rmApp, true, WebAppUtils.getHttpSchemePrefix(conf));
-
-    // Check for the authorization.
-    String remoteUser = request().getRemoteUser();
-    UserGroupInformation callerUGI = null;
-    if (remoteUser != null) {
-      callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
-    }
-    if (callerUGI != null
-        && !(this.rm.getApplicationACLsManager().checkAccess(callerUGI,
-            ApplicationAccessType.VIEW_APP, app.getUser(), appID) || this.rm
-            .getQueueACLsManager().checkAccess(callerUGI,
-                QueueACL.ADMINISTER_QUEUE, app.getQueue()))) {
-      puts("You (User " + remoteUser
-          + ") are not authorized to view application " + appID);
-      return;
-    }
-
-    setTitle(join("Application ", aid));
-
-    if (rmWebAppUIActions) {
-      // Application Kill
-      html.div()
-        .button()
-          .$onclick("confirmAction()").b("Kill Application")._()
-          ._();
-
-      StringBuilder script = new StringBuilder();
-      script.append("function confirmAction() {")
-          .append(" b = confirm(\"Are you sure?\");")
-          .append(" if (b == true) {")
-          .append(" $.ajax({")
-          .append(" type: 'PUT',")
-          .append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
-          .append(" contentType: 'application/json',")
-          .append(" data: '{\"state\":\"KILLED\"}',")
-          .append(" dataType: 'json'")
-          .append(" }).done(function(data){")
-          .append(" setTimeout(function(){")
-          .append(" location.href = '/cluster/app/").append(aid).append("';")
-          .append(" }, 1000);")
-          .append(" }).fail(function(data){")
-          .append(" console.log(data);")
-          .append(" });")
-          .append(" }")
-          .append("}");
-
-      html.script().$type("text/javascript")._(script.toString())._();
-    }
-
-    RMAppMetrics appMerics = rmApp.getRMAppMetrics();
-    
-    // Get attempt metrics and fields, it is possible currentAttempt of RMApp is
-    // null. In that case, we will assume resource preempted and number of Non
-    // AM container preempted on that attempt is 0
-    RMAppAttemptMetrics attemptMetrics;
-    if (null == rmApp.getCurrentAppAttempt()) {
-      attemptMetrics = null;
-    } else {
-      attemptMetrics = rmApp.getCurrentAppAttempt().getRMAppAttemptMetrics();
-    }
-    Resource attemptResourcePreempted =
-        attemptMetrics == null ? Resources.none() : attemptMetrics
-            .getResourcePreempted();
-    int attemptNumNonAMContainerPreempted =
-        attemptMetrics == null ? 0 : attemptMetrics
-            .getNumNonAMContainersPreempted();
-    
-    info("Application Overview")
-        ._("User:", app.getUser())
-        ._("Name:", app.getName())
-        ._("Application Type:", app.getApplicationType())
-        ._("Application Tags:", app.getApplicationTags())
-        ._("YarnApplicationState:", clarifyAppState(app.getState()))
-        ._("FinalStatus Reported by AM:",
-          clairfyAppFinalStatus(app.getFinalStatus()))
-        ._("Started:", Times.format(app.getStartTime()))
-        ._("Elapsed:",
-            StringUtils.formatTime(Times.elapsed(app.getStartTime(),
-                app.getFinishTime())))
-        ._("Tracking URL:",
-            !app.isTrackingUrlReady() ? "#" : app.getTrackingUrlPretty(),
-            app.getTrackingUI())
-        ._("Diagnostics:", app.getNote());
-
-    DIV<Hamlet> pdiv = html.
-        _(InfoBlock.class).
-        div(_INFO_WRAP);
-    info("Application Overview").clear();
-    info("Application Metrics")
-        ._("Total Resource Preempted:",
-          appMerics.getResourcePreempted())
-        ._("Total Number of Non-AM Containers Preempted:",
-          String.valueOf(appMerics.getNumNonAMContainersPreempted()))
-        ._("Total Number of AM Containers Preempted:",
-          String.valueOf(appMerics.getNumAMContainersPreempted()))
-        ._("Resource Preempted from Current Attempt:",
-          attemptResourcePreempted)
-        ._("Number of Non-AM Containers Preempted from Current Attempt:",
-          attemptNumNonAMContainerPreempted)
-        ._("Aggregate Resource Allocation:",
-          String.format("%d MB-seconds, %d vcore-seconds", 
-              appMerics.getMemorySeconds(), appMerics.getVcoreSeconds()));
-    pdiv._();
-
-    Collection<RMAppAttempt> attempts = rmApp.getAppAttempts().values();
-    String amString =
-        attempts.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
-
-    DIV<Hamlet> div = html.
-        _(InfoBlock.class).
-        div(_INFO_WRAP);
-    // MRAppMasters Table
-    TABLE<DIV<Hamlet>> table = div.table("#app");
-    table.
-      tr().
-        th(amString).
-      _().
-      tr().
-        th(_TH, "Attempt Number").
-        th(_TH, "Start Time").
-        th(_TH, "Node").
-        th(_TH, "Logs").
-      _();
-
-    boolean odd = false;
-    for (RMAppAttempt attempt : attempts) {
-      AppAttemptInfo attemptInfo = new AppAttemptInfo(attempt, app.getUser());
-      table.tr((odd = !odd) ? _ODD : _EVEN).
-        td(String.valueOf(attemptInfo.getAttemptId())).
-        td(Times.format(attemptInfo.getStartTime())).
-        td().a(".nodelink", url("//",
-            attemptInfo.getNodeHttpAddress()),
-            attemptInfo.getNodeHttpAddress())._().
-        td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
-      _();
-    }
-
-    table._();
-    div._();
-
-    createContainerLocalityTable(html, attemptMetrics);
-    createResourceRequestsTable(html, app);
-  }
-
-  private void createContainerLocalityTable(Block html,
-      RMAppAttemptMetrics attemptMetrics) {
-    if (attemptMetrics == null) {
-      return;
-    }
-
-    DIV<Hamlet> div = html.div(_INFO_WRAP);
-    TABLE<DIV<Hamlet>> table =
-        div.h3(
-          "Total Allocated Containers: "
-              + attemptMetrics.getTotalAllocatedContainers()).h3("Each table cell"
-            + " represents the number of NodeLocal/RackLocal/OffSwitch containers"
-            + " satisfied by NodeLocal/RackLocal/OffSwitch resource requests.").table(
-          "#containerLocality");
-    table.
-      tr().
-        th(_TH, "").
-        th(_TH, "Node Local Request").
-        th(_TH, "Rack Local Request").
-        th(_TH, "Off Switch Request").
-      _();
-
-    String[] containersType =
-        { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
-            "Num Off Switch Containers (satisfied by)" };
-    boolean odd = false;
-    for (int i = 0; i < attemptMetrics.getLocalityStatistics().length; i++) {
-      table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
-        .td(String.valueOf(attemptMetrics.getLocalityStatistics()[i][0]))
-        .td(i == 0 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][1]))
-        .td(i <= 1 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][2]))._();
-    }
-    table._();
-    div._();
-  }
-
-  private void createResourceRequestsTable(Block html, AppInfo app) {
-    TBODY<TABLE<Hamlet>> tbody =
-        html.table("#ResourceRequests").thead().tr()
-          .th(".priority", "Priority")
-          .th(".resourceName", "Resource Name")
-          .th(".totalResource", "Capability")
-          .th(".numContainers", "Num Containers")
-          .th(".relaxLocality", "Relax Locality")
-          .th(".nodeLabelExpression", "Node Label Expression")._()._().tbody();
-
-    Resource totalResource = Resource.newInstance(0, 0);
-    if (app.getResourceRequests() != null) {
-      for (ResourceRequest request : app.getResourceRequests()) {
-        if (request.getNumContainers() == 0) {
-          continue;
-        }
-
-        tbody.tr()
-          .td(String.valueOf(request.getPriority()))
-          .td(request.getResourceName())
-          .td(String.valueOf(request.getCapability()))
-          .td(String.valueOf(request.getNumContainers()))
-          .td(String.valueOf(request.getRelaxLocality()))
-          .td(request.getNodeLabelExpression() == null ? "N/A" : request
-              .getNodeLabelExpression())._();
-        if (request.getResourceName().equals(ResourceRequest.ANY)) {
-          Resources.addTo(totalResource,
-            Resources.multiply(request.getCapability(),
-              request.getNumContainers()));
-        }
-      }
-    }
-    html.div().$class("totalResourceRequests")
-      .h3("Total Outstanding Resource Requests: " + totalResource)._();
-    tbody._()._();
-  }
-
-  private String clarifyAppState(YarnApplicationState state) {
-    String ret = state.toString();
-    switch (state) {
-    case NEW:
-      return ret + ": waiting for application to be initialized";
-    case NEW_SAVING:
-      return ret + ": waiting for application to be persisted in state-store.";
-    case SUBMITTED:
-      return ret + ": waiting for application to be accepted by scheduler.";
-    case ACCEPTED:
-      return ret + ": waiting for AM container to be allocated, launched and"
-          + " register with RM.";
-    case RUNNING:
-      return ret + ": AM has registered with RM and started running.";
-    default:
-      return ret;
-    }
-  }
-
-  private String clairfyAppFinalStatus(FinalApplicationStatus status) {
-    if (status == FinalApplicationStatus.UNDEFINED) {
-      return "Application has not completed yet.";
-    }
-    return status.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
index 8993324..9f9b7c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
@@ -18,19 +18,38 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
 
+import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
 public class AppPage extends RmView {
 
-  @Override protected void preHead(Page.HTML<_> html) {
+  @Override 
+  protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
-    set(DATATABLES_ID, "ResourceRequests");
+    String appId = $(YarnWebParams.APPLICATION_ID);
+    set(
+      TITLE,
+      appId.isEmpty() ? "Bad request: missing application ID" : join(
+        "Application ", $(YarnWebParams.APPLICATION_ID)));
+
+    set(DATATABLES_ID, "attempts ResourceRequests");
+    set(initID(DATATABLES, "attempts"), WebPageUtils.attemptsTableInit());
+    setTableStyles(html, "attempts", ".queue {width:6em}", ".ui {width:8em}");
+
     setTableStyles(html, "ResourceRequests");
+
+    set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.RM_WEB_UI);
   }
 
-  @Override protected Class<? extends SubView> content() {
+  @Override 
+  protected Class<? extends SubView> content() {
     return AppBlock.class;
   }
 }


[30/50] [abbrv] hadoop git commit: Update CHANGES.txt for YARN-2616 to fix indentation.

Posted by ji...@apache.org.
Update CHANGES.txt for YARN-2616 to fix indentation.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22b1f538
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22b1f538
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22b1f538

Branch: refs/heads/HDFS-7285
Commit: 22b1f538fcf5d2e470e87845cf0b217a1289e873
Parents: fed8745
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Fri Mar 6 00:53:03 2015 +0900
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22b1f538/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3ea5501..5f61462 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -89,8 +89,8 @@ Release 2.7.0 - UNRELEASED
     YARN-2217. [YARN-1492] Shared cache client side changes. 
     (Chris Trezzo via kasha)
 
-     YARN-2616 [YARN-913] Add CLI client to the registry to list, view
-     and manipulate entries. (Akshay Radia via stevel)
+    YARN-2616 [YARN-913] Add CLI client to the registry to list, view
+    and manipulate entries. (Akshay Radia via stevel)
 
     YARN-2994. Document work-preserving RM restart. (Jian He via ozawa)
 


[34/50] [abbrv] hadoop git commit: HDFS-7818. OffsetParam should return the default value instead of throwing NPE when the value is unspecified. Contributed by Eric Payne.

Posted by ji...@apache.org.
HDFS-7818. OffsetParam should return the default value instead of throwing NPE when the value is unspecified. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fcae1207
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fcae1207
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fcae1207

Branch: refs/heads/HDFS-7285
Commit: fcae12071f5b77efdbf1251113f7483f3a694a8d
Parents: 1752b65
Author: Haohui Mai <wh...@apache.org>
Authored: Fri Mar 6 14:26:23 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:26 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt      |  3 +++
 .../datanode/web/webhdfs/ParameterParser.java    |  2 +-
 .../hadoop/hdfs/web/resources/OffsetParam.java   |  5 +++++
 .../web/webhdfs/TestParameterParser.java         | 19 +++++++++++++++++++
 4 files changed, 28 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcae1207/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e622a57..b443902 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1107,6 +1107,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7885. Datanode should not trust the generation stamp provided by
     client. (Tsz Wo Nicholas Sze via jing9)
 
+    HDFS-7818. OffsetParam should return the default value instead of throwing
+    NPE when the value is unspecified. (Eric Payne via wheat9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcae1207/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index 5749504..2baafe8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -62,7 +62,7 @@ class ParameterParser {
   }
 
   long offset() {
-    return new OffsetParam(param(OffsetParam.NAME)).getValue();
+    return new OffsetParam(param(OffsetParam.NAME)).getOffset();
   }
 
   String namenodeId() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcae1207/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
index 6973787..6d88703 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
@@ -46,4 +46,9 @@ public class OffsetParam extends LongParam {
   public String getName() {
     return NAME;
   }
+
+  public Long getOffset() {
+    Long offset = getValue();
+    return (offset == null) ? Long.valueOf(0) : offset;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcae1207/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
index 6a6c5d0..8aee1d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Test;
@@ -65,4 +66,22 @@ public class TestParameterParser {
     ParameterParser testParser = new ParameterParser(decoder, conf);
     Assert.assertEquals(EXPECTED_PATH, testParser.path());
   }
+
+  @Test
+  public void testOffset() throws IOException {
+    final long X = 42;
+
+    long offset = new OffsetParam(Long.toString(X)).getOffset();
+    Assert.assertEquals("OffsetParam: ", X, offset);
+
+    offset = new OffsetParam((String) null).getOffset();
+    Assert.assertEquals("OffsetParam with null should have defaulted to 0", 0, offset);
+
+    try {
+      offset = new OffsetParam("abc").getValue();
+      Assert.fail("OffsetParam with nondigit value should have thrown IllegalArgumentException");
+    } catch (IllegalArgumentException iae) {
+      // Ignore
+    }
+  }
 }


[28/50] [abbrv] hadoop git commit: HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static. (Sean Busbey via yliu)

Posted by ji...@apache.org.
HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static. (Sean Busbey via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffa5622d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffa5622d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffa5622d

Branch: refs/heads/HDFS-7285
Commit: ffa5622df38d2c0dac7e9f22c953f6a905e8ece7
Parents: f5632a4
Author: yliu <yl...@apache.org>
Authored: Thu Mar 5 06:38:45 2015 +0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java | 2 +-
 .../main/java/org/apache/hadoop/crypto/CryptoOutputStream.java    | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffa5622d/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 92af646..65c6d85 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1041,6 +1041,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
     to wrapped FS. (gera)
 
+    HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+    should be non static. (Sean Busbey via yliu)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffa5622d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index f3e5b90..2e87f91 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -60,7 +60,7 @@ public class CryptoInputStream extends FilterInputStream implements
     Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
     CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, 
     ReadableByteChannel {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Decryptor decryptor;
   private final int bufferSize;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffa5622d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index 876ffd6..f1ea0fc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -45,7 +45,7 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
     Syncable, CanSetDropBehind {
-  private static final byte[] oneByteBuf = new byte[1];
+  private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
   private final int bufferSize;


[12/50] [abbrv] hadoop git commit: HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. Contributed by Chris Nauroth.

Posted by ji...@apache.org.
HDFS-7879. hdfs.dll does not export functions of the public libhdfs API. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aca0abef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aca0abef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aca0abef

Branch: refs/heads/HDFS-7285
Commit: aca0abefea5b6bd5249d0ee35875a598c1ed1e7c
Parents: 871bd4e
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Mar 4 09:17:21 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hadoop-hdfs/src/CMakeLists.txt              | 23 +++--
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  | 92 +++++++++++++++++++-
 3 files changed, 111 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca0abef/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2037973..62006d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1086,6 +1086,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7869. Inconsistency in the return information while performing rolling
     upgrade ( J.Andreina via vinayakumarb )
 
+    HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
+    (Chris Nauroth via wheat9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca0abef/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index aceeac1..563727b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -27,7 +27,15 @@ include(../../../hadoop-common-project/hadoop-common/src/JNIFlags.cmake NO_POLIC
 function(add_dual_library LIBNAME)
     add_library(${LIBNAME} SHARED ${ARGN})
     add_library(${LIBNAME}_static STATIC ${ARGN})
-    set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+    # Linux builds traditionally ship a libhdfs.a (static linking) and libhdfs.so
+    # (dynamic linking).  On Windows, we cannot use the same base name for both
+    # static and dynamic, because Windows does not use distinct file extensions
+    # for a statically linked library vs. a DLL import library.  Both use the
+    # .lib extension.  On Windows, we'll build the static library as
+    # hdfs_static.lib.
+    if (NOT WIN32)
+        set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+    endif (NOT WIN32)
 endfunction(add_dual_library)
 
 # Link both a static and a dynamic target against some libraries
@@ -105,11 +113,14 @@ else (WIN32)
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
     set(OS_DIR main/native/libhdfs/os/posix)
     set(OS_LINK_LIBRARIES pthread)
     set(OUT_DIR target/usr/local/lib)
 endif (WIN32)
 
+add_definitions(-DLIBHDFS_DLL_EXPORT)
+
 include_directories(
     ${GENERATED_JAVAH}
     ${CMAKE_CURRENT_SOURCE_DIR}
@@ -150,7 +161,7 @@ add_executable(test_libhdfs_ops
     main/native/libhdfs/test/test_libhdfs_ops.c
 )
 target_link_libraries(test_libhdfs_ops
-    hdfs
+    hdfs_static
     ${JAVA_JVM_LIBRARY}
 )
 
@@ -158,7 +169,7 @@ add_executable(test_libhdfs_read
     main/native/libhdfs/test/test_libhdfs_read.c
 )
 target_link_libraries(test_libhdfs_read
-    hdfs
+    hdfs_static
     ${JAVA_JVM_LIBRARY}
 )
 
@@ -166,7 +177,7 @@ add_executable(test_libhdfs_write
     main/native/libhdfs/test/test_libhdfs_write.c
 )
 target_link_libraries(test_libhdfs_write
-    hdfs
+    hdfs_static
     ${JAVA_JVM_LIBRARY}
 )
 
@@ -196,7 +207,7 @@ add_executable(test_libhdfs_threaded
     ${OS_DIR}/thread.c
 )
 target_link_libraries(test_libhdfs_threaded
-    hdfs
+    hdfs_static
     native_mini_dfs
     ${OS_LINK_LIBRARIES}
 )
@@ -206,7 +217,7 @@ add_executable(test_libhdfs_zerocopy
     main/native/libhdfs/test/test_libhdfs_zerocopy.c
 )
 target_link_libraries(test_libhdfs_zerocopy
-    hdfs
+    hdfs_static
     native_mini_dfs
     ${OS_LINK_LIBRARIES}
 )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aca0abef/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
index 3406d6b..64889ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
@@ -24,6 +24,30 @@
 #include <stdint.h> /* for uint64_t, etc. */
 #include <time.h> /* for time_t */
 
+/*
+ * Support export of DLL symbols during libhdfs build, and import of DLL symbols
+ * during client application build.  A client application may optionally define
+ * symbol LIBHDFS_DLL_IMPORT in its build.  This is not strictly required, but
+ * the compiler can produce more efficient code with it.
+ */
+#ifdef WIN32
+    #ifdef LIBHDFS_DLL_EXPORT
+        #define LIBHDFS_EXTERNAL __declspec(dllexport)
+    #elif LIBHDFS_DLL_IMPORT
+        #define LIBHDFS_EXTERNAL __declspec(dllimport)
+    #else
+        #define LIBHDFS_EXTERNAL
+    #endif
+#else
+    #ifdef LIBHDFS_DLL_EXPORT
+        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
+    #elif LIBHDFS_DLL_IMPORT
+        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
+    #else
+        #define LIBHDFS_EXTERNAL
+    #endif
+#endif
+
 #ifndef O_RDONLY
 #define O_RDONLY 1
 #endif
@@ -77,6 +101,7 @@ extern  "C" {
      * @param file     The HDFS file
      * @return         1 if the file is open for read; 0 otherwise
      */
+    LIBHDFS_EXTERNAL
     int hdfsFileIsOpenForRead(hdfsFile file);
 
     /**
@@ -85,6 +110,7 @@ extern  "C" {
      * @param file     The HDFS file
      * @return         1 if the file is open for write; 0 otherwise
      */
+    LIBHDFS_EXTERNAL
     int hdfsFileIsOpenForWrite(hdfsFile file);
 
     struct hdfsReadStatistics {
@@ -107,6 +133,7 @@ extern  "C" {
      *                 ENOTSUP.  webhdfs, LocalFilesystem, and so forth may
      *                 not support read statistics.
      */
+    LIBHDFS_EXTERNAL
     int hdfsFileGetReadStatistics(hdfsFile file,
                                   struct hdfsReadStatistics **stats);
 
@@ -115,6 +142,7 @@ extern  "C" {
      *
      * @return the number of remote bytes read.
      */
+    LIBHDFS_EXTERNAL
     int64_t hdfsReadStatisticsGetRemoteBytesRead(
                             const struct hdfsReadStatistics *stats);
 
@@ -129,6 +157,7 @@ extern  "C" {
      *                  statistics.
      *                  Errno will also be set to this code on failure.
      */
+    LIBHDFS_EXTERNAL
     int hdfsFileClearReadStatistics(hdfsFile file);
 
     /**
@@ -136,6 +165,7 @@ extern  "C" {
      *
      * @param stats    The HDFS read statistics to free.
      */
+    LIBHDFS_EXTERNAL
     void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats);
 
     /** 
@@ -147,6 +177,7 @@ extern  "C" {
      * @return Returns a handle to the filesystem or NULL on error.
      * @deprecated Use hdfsBuilderConnect instead. 
      */
+     LIBHDFS_EXTERNAL
      hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user);
 
     /** 
@@ -157,6 +188,7 @@ extern  "C" {
      * @return Returns a handle to the filesystem or NULL on error.
      * @deprecated Use hdfsBuilderConnect instead. 
      */
+     LIBHDFS_EXTERNAL
      hdfsFS hdfsConnect(const char* nn, tPort port);
 
     /** 
@@ -170,6 +202,7 @@ extern  "C" {
      * @return       Returns a handle to the filesystem or NULL on error.
      * @deprecated   Use hdfsBuilderConnect instead. 
      */
+     LIBHDFS_EXTERNAL
      hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char *user );
 
     /** 
@@ -182,6 +215,7 @@ extern  "C" {
      * @return       Returns a handle to the filesystem or NULL on error.
      * @deprecated   Use hdfsBuilderConnect instead. 
      */
+     LIBHDFS_EXTERNAL
      hdfsFS hdfsConnectNewInstance(const char* nn, tPort port);
 
     /** 
@@ -196,6 +230,7 @@ extern  "C" {
      * @param bld    The HDFS builder
      * @return       Returns a handle to the filesystem, or NULL on error.
      */
+     LIBHDFS_EXTERNAL
      hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld);
 
     /**
@@ -203,6 +238,7 @@ extern  "C" {
      *
      * @return The HDFS builder, or NULL on error.
      */
+    LIBHDFS_EXTERNAL
     struct hdfsBuilder *hdfsNewBuilder(void);
 
     /**
@@ -211,6 +247,7 @@ extern  "C" {
      *
      * @param bld The HDFS builder
      */
+    LIBHDFS_EXTERNAL
     void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld);
 
     /**
@@ -234,6 +271,7 @@ extern  "C" {
      *             hdfsBuilderSetNameNodePort.  However, you must not pass the
      *             port in two different ways.
      */
+    LIBHDFS_EXTERNAL
     void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn);
 
     /**
@@ -242,6 +280,7 @@ extern  "C" {
      * @param bld The HDFS builder
      * @param port The port.
      */
+    LIBHDFS_EXTERNAL
     void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port);
 
     /**
@@ -250,6 +289,7 @@ extern  "C" {
      * @param bld The HDFS builder
      * @param userName The user name.  The string will be shallow-copied.
      */
+    LIBHDFS_EXTERNAL
     void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName);
 
     /**
@@ -260,6 +300,7 @@ extern  "C" {
      * @param kerbTicketCachePath The Kerberos ticket cache path.  The string
      *                            will be shallow-copied.
      */
+    LIBHDFS_EXTERNAL
     void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
                                    const char *kerbTicketCachePath);
 
@@ -271,6 +312,7 @@ extern  "C" {
      *
      * @param bld The HDFS builder
      */
+    LIBHDFS_EXTERNAL
     void hdfsFreeBuilder(struct hdfsBuilder *bld);
 
     /**
@@ -284,6 +326,7 @@ extern  "C" {
      *
      * @return         0 on success; nonzero error code otherwise.
      */
+    LIBHDFS_EXTERNAL
     int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
                               const char *val);
 
@@ -298,6 +341,7 @@ extern  "C" {
      * @return         0 on success; nonzero error code otherwise.
      *                 Failure to find the key is not an error.
      */
+    LIBHDFS_EXTERNAL
     int hdfsConfGetStr(const char *key, char **val);
 
     /**
@@ -310,6 +354,7 @@ extern  "C" {
      * @return         0 on success; nonzero error code otherwise.
      *                 Failure to find the key is not an error.
      */
+    LIBHDFS_EXTERNAL
     int hdfsConfGetInt(const char *key, int32_t *val);
 
     /**
@@ -317,6 +362,7 @@ extern  "C" {
      *
      * @param val      A configuration string obtained from hdfsConfGetStr
      */
+    LIBHDFS_EXTERNAL
     void hdfsConfStrFree(char *val);
 
     /** 
@@ -327,6 +373,7 @@ extern  "C" {
      *         Even if there is an error, the resources associated with the
      *         hdfsFS will be freed.
      */
+    LIBHDFS_EXTERNAL
     int hdfsDisconnect(hdfsFS fs);
         
 
@@ -344,6 +391,7 @@ extern  "C" {
      * default configured values.
      * @return Returns the handle to the open file or NULL on error.
      */
+    LIBHDFS_EXTERNAL
     hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
                           int bufferSize, short replication, tSize blocksize);
 
@@ -355,6 +403,7 @@ extern  "C" {
      *              ENOTSUP if the file does not support unbuffering
      *              Errno will also be set to this value.
      */
+    LIBHDFS_EXTERNAL
     int hdfsUnbufferFile(hdfsFile file);
 
     /** 
@@ -367,6 +416,7 @@ extern  "C" {
      *         be freed at the end of this call, even if there was an I/O
      *         error.
      */
+    LIBHDFS_EXTERNAL
     int hdfsCloseFile(hdfsFS fs, hdfsFile file);
 
 
@@ -376,6 +426,7 @@ extern  "C" {
      * @param path The path to look for
      * @return Returns 0 on success, -1 on error.  
      */
+    LIBHDFS_EXTERNAL
     int hdfsExists(hdfsFS fs, const char *path);
 
 
@@ -387,6 +438,7 @@ extern  "C" {
      * @param desiredPos Offset into the file to seek into.
      * @return Returns 0 on success, -1 on error.  
      */
+    LIBHDFS_EXTERNAL
     int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos); 
 
 
@@ -396,6 +448,7 @@ extern  "C" {
      * @param file The file handle.
      * @return Current offset, -1 on error.
      */
+    LIBHDFS_EXTERNAL
     tOffset hdfsTell(hdfsFS fs, hdfsFile file);
 
 
@@ -413,6 +466,7 @@ extern  "C" {
      *              and set errno to EINTR if data is temporarily unavailable,
      *              but we are not yet at the end of the file.
      */
+    LIBHDFS_EXTERNAL
     tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
 
     /** 
@@ -424,6 +478,7 @@ extern  "C" {
      * @param length The length of the buffer.
      * @return      See hdfsRead
      */
+    LIBHDFS_EXTERNAL
     tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
                     void* buffer, tSize length);
 
@@ -436,6 +491,7 @@ extern  "C" {
      * @param length The no. of bytes to write. 
      * @return Returns the number of bytes written, -1 on error.
      */
+    LIBHDFS_EXTERNAL
     tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
                     tSize length);
 
@@ -446,6 +502,7 @@ extern  "C" {
      * @param file The file handle.
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsFlush(hdfsFS fs, hdfsFile file);
 
 
@@ -456,6 +513,7 @@ extern  "C" {
      * @param file file handle
      * @return 0 on success, -1 on error and sets errno
      */
+    LIBHDFS_EXTERNAL
     int hdfsHFlush(hdfsFS fs, hdfsFile file);
 
 
@@ -467,6 +525,7 @@ extern  "C" {
      * @param file file handle
      * @return 0 on success, -1 on error and sets errno
      */
+    LIBHDFS_EXTERNAL
     int hdfsHSync(hdfsFS fs, hdfsFile file);
 
 
@@ -477,6 +536,7 @@ extern  "C" {
      * @param file The file handle.
      * @return Returns available bytes; -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsAvailable(hdfsFS fs, hdfsFile file);
 
 
@@ -488,6 +548,7 @@ extern  "C" {
      * @param dst The path of destination file. 
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
 
 
@@ -499,6 +560,7 @@ extern  "C" {
      * @param dst The path of destination file. 
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
 
 
@@ -511,6 +573,7 @@ extern  "C" {
      * case of a file the recursive argument is irrelevant.
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsDelete(hdfsFS fs, const char* path, int recursive);
 
     /**
@@ -520,6 +583,7 @@ extern  "C" {
      * @param newPath The path of the destination file. 
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath);
 
 
@@ -531,6 +595,7 @@ extern  "C" {
      * @param bufferSize The length of user-buffer.
      * @return Returns buffer, NULL on error.
      */
+    LIBHDFS_EXTERNAL
     char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize);
 
 
@@ -541,6 +606,7 @@ extern  "C" {
      * @param path The path of the new 'cwd'. 
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsSetWorkingDirectory(hdfsFS fs, const char* path);
 
 
@@ -551,6 +617,7 @@ extern  "C" {
      * @param path The path of the directory. 
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsCreateDirectory(hdfsFS fs, const char* path);
 
 
@@ -561,6 +628,7 @@ extern  "C" {
      * @param path The path of the file. 
      * @return Returns 0 on success, -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication);
 
 
@@ -590,6 +658,7 @@ extern  "C" {
      * @return Returns a dynamically-allocated array of hdfsFileInfo
      * objects; NULL on error.
      */
+    LIBHDFS_EXTERNAL
     hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path,
                                     int *numEntries);
 
@@ -603,6 +672,7 @@ extern  "C" {
      * @return Returns a dynamically-allocated hdfsFileInfo object;
      * NULL on error.
      */
+    LIBHDFS_EXTERNAL
     hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path);
 
 
@@ -612,6 +682,7 @@ extern  "C" {
      * objects.
      * @param numEntries The size of the array.
      */
+    LIBHDFS_EXTERNAL
     void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries);
 
     /**
@@ -620,6 +691,7 @@ extern  "C" {
      * @return -1 if there was an error (errno will be set), 0 if the file is
      *         not encrypted, 1 if the file is encrypted.
      */
+    LIBHDFS_EXTERNAL
     int hdfsFileIsEncrypted(hdfsFileInfo *hdfsFileInfo);
 
 
@@ -635,6 +707,7 @@ extern  "C" {
      * @return Returns a dynamically-allocated 2-d array of blocks-hosts;
      * NULL on error.
      */
+    LIBHDFS_EXTERNAL
     char*** hdfsGetHosts(hdfsFS fs, const char* path, 
             tOffset start, tOffset length);
 
@@ -645,6 +718,7 @@ extern  "C" {
      * objects.
      * @param numEntries The size of the array.
      */
+    LIBHDFS_EXTERNAL
     void hdfsFreeHosts(char ***blockHosts);
 
 
@@ -656,6 +730,7 @@ extern  "C" {
      *
      * @return              Returns the default blocksize, or -1 on error.
      */
+    LIBHDFS_EXTERNAL
     tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
 
 
@@ -669,6 +744,7 @@ extern  "C" {
      *
      * @return              Returns the default blocksize, or -1 on error.
      */
+    LIBHDFS_EXTERNAL
     tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path);
 
 
@@ -677,6 +753,7 @@ extern  "C" {
      * @param fs The configured filesystem handle.
      * @return Returns the raw-capacity; -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     tOffset hdfsGetCapacity(hdfsFS fs);
 
 
@@ -685,6 +762,7 @@ extern  "C" {
      * @param fs The configured filesystem handle.
      * @return Returns the total-size; -1 on error. 
      */
+    LIBHDFS_EXTERNAL
     tOffset hdfsGetUsed(hdfsFS fs);
 
     /** 
@@ -696,6 +774,7 @@ extern  "C" {
      * @param group         Group string.  Set to NULL for 'no change'
      * @return              0 on success else -1
      */
+    LIBHDFS_EXTERNAL
     int hdfsChown(hdfsFS fs, const char* path, const char *owner,
                   const char *group);
 
@@ -706,7 +785,8 @@ extern  "C" {
      * @param mode the bitmask to set it to
      * @return 0 on success else -1
      */
-      int hdfsChmod(hdfsFS fs, const char* path, short mode);
+    LIBHDFS_EXTERNAL
+    int hdfsChmod(hdfsFS fs, const char* path, short mode);
 
     /** 
      * hdfsUtime
@@ -716,6 +796,7 @@ extern  "C" {
      * @param atime new access time or -1 for no change
      * @return 0 on success else -1
      */
+    LIBHDFS_EXTERNAL
     int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime);
 
     /**
@@ -728,6 +809,7 @@ extern  "C" {
      *                    not be allocated.  If NULL is returned, errno will
      *                    contain the error number.
      */
+    LIBHDFS_EXTERNAL
     struct hadoopRzOptions *hadoopRzOptionsAlloc(void);
 
     /**
@@ -739,6 +821,7 @@ extern  "C" {
      *
      * @return            0 on success; -1 plus errno on failure.
      */
+    LIBHDFS_EXTERNAL
     int hadoopRzOptionsSetSkipChecksum(
             struct hadoopRzOptions *opts, int skip);
 
@@ -756,6 +839,7 @@ extern  "C" {
      *                    instantiated;
      *                    -1 plus errno otherwise.
      */
+    LIBHDFS_EXTERNAL
     int hadoopRzOptionsSetByteBufferPool(
             struct hadoopRzOptions *opts, const char *className);
 
@@ -765,6 +849,7 @@ extern  "C" {
      * @param opts        The options structure to free.
      *                    Any associated ByteBufferPool will also be freed.
      */
+    LIBHDFS_EXTERNAL
     void hadoopRzOptionsFree(struct hadoopRzOptions *opts);
 
     /**
@@ -790,6 +875,7 @@ extern  "C" {
      *                   zero-copy read, and there was no ByteBufferPool
      *                   supplied.
      */
+    LIBHDFS_EXTERNAL
     struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
             struct hadoopRzOptions *opts, int32_t maxLength);
 
@@ -799,6 +885,7 @@ extern  "C" {
      * @param buffer     a buffer returned from readZero.
      * @return           the length of the buffer.
      */
+    LIBHDFS_EXTERNAL
     int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buffer);
 
     /**
@@ -811,6 +898,7 @@ extern  "C" {
      * @return           a pointer to the start of the buffer.  This will be
      *                   NULL when end-of-file has been reached.
      */
+    LIBHDFS_EXTERNAL
     const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buffer);
 
     /**
@@ -820,12 +908,14 @@ extern  "C" {
      *                   the same stream you called hadoopReadZero on.
      * @param buffer     The buffer to release.
      */
+    LIBHDFS_EXTERNAL
     void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer);
 
 #ifdef __cplusplus
 }
 #endif
 
+#undef LIBHDFS_EXTERNAL
 #endif /*LIBHDFS_HDFS_H*/
 
 /**


[29/50] [abbrv] hadoop git commit: YARN-3249. Add a 'kill application' button to Resource Manager's Web UI. Contributed by Ryu Kobayashi.

Posted by ji...@apache.org.
YARN-3249. Add a 'kill application' button to Resource Manager's Web UI. Contributed by Ryu Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fed87455
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fed87455
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fed87455

Branch: refs/heads/HDFS-7285
Commit: fed87455b03142529e9ceeded1ec942e3fd568ed
Parents: ffa5622
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Thu Mar 5 19:55:56 2015 +0900
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../hadoop/yarn/conf/YarnConfiguration.java     |  6 ++++
 .../server/resourcemanager/webapp/AppBlock.java | 35 ++++++++++++++++++++
 3 files changed, 44 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed87455/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4dd61eb..3ea5501 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -94,6 +94,9 @@ Release 2.7.0 - UNRELEASED
 
     YARN-2994. Document work-preserving RM restart. (Jian He via ozawa)
 
+    YARN-3249. Add a 'kill application' button to Resource Manager's Web UI.
+    (Ryu Kobayashi via ozawa)
+
   IMPROVEMENTS
 
     YARN-3005. [JDK7] Use switch statement for String instead of if-else

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed87455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ff06eea..25b808e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -184,6 +184,12 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME = 
       false;
 
+  /** Enable Resource Manager webapp ui actions */
+  public static final String RM_WEBAPP_UI_ACTIONS_ENABLED =
+    RM_PREFIX + "webapp.ui-actions.enabled";
+  public static final boolean DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED =
+    true;
+
   /** Whether the RM should enable Reservation System */
   public static final String RM_RESERVATION_SYSTEM_ENABLE = RM_PREFIX
       + "reservation-system.enable";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fed87455/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index 45df93e..00508b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -62,12 +63,16 @@ public class AppBlock extends HtmlBlock {
 
   private final Configuration conf;
   private final ResourceManager rm;
+  private final boolean rmWebAppUIActions;
 
   @Inject
   AppBlock(ResourceManager rm, ViewContext ctx, Configuration conf) {
     super(ctx);
     this.conf = conf;
     this.rm = rm;
+    this.rmWebAppUIActions =
+        conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED,
+                YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED);
   }
 
   @Override
@@ -113,6 +118,36 @@ public class AppBlock extends HtmlBlock {
 
     setTitle(join("Application ", aid));
 
+    if (rmWebAppUIActions) {
+      // Application Kill
+      html.div()
+        .button()
+          .$onclick("confirmAction()").b("Kill Application")._()
+          ._();
+
+      StringBuilder script = new StringBuilder();
+      script.append("function confirmAction() {")
+          .append(" b = confirm(\"Are you sure?\");")
+          .append(" if (b == true) {")
+          .append(" $.ajax({")
+          .append(" type: 'PUT',")
+          .append(" url: '/ws/v1/cluster/apps/").append(aid).append("/state',")
+          .append(" contentType: 'application/json',")
+          .append(" data: '{\"state\":\"KILLED\"}',")
+          .append(" dataType: 'json'")
+          .append(" }).done(function(data){")
+          .append(" setTimeout(function(){")
+          .append(" location.href = '/cluster/app/").append(aid).append("';")
+          .append(" }, 1000);")
+          .append(" }).fail(function(data){")
+          .append(" console.log(data);")
+          .append(" });")
+          .append(" }")
+          .append("}");
+
+      html.script().$type("text/javascript")._(script.toString())._();
+    }
+
     RMAppMetrics appMerics = rmApp.getRMAppMetrics();
     
     // Get attempt metrics and fields, it is possible currentAttempt of RMApp is


[27/50] [abbrv] hadoop git commit: MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f374be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f374be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f374be

Branch: refs/heads/HDFS-7285
Commit: b9f374be0a24678df6c7b2301df65e48b7de6629
Parents: 31d3efe
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Thu Mar 5 14:12:47 2015 +0900
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                             | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java     | 4 ----
 2 files changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f374be/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 212727e..d0d8216 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -414,6 +414,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
     via ozawa)
 
+    MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. (Brahma 
+    Reddy Battula via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f374be/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 8d5be86..5d3ad5b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -1451,10 +1451,6 @@ public class MRAppMaster extends CompositeService {
       String jobUserName = System
           .getenv(ApplicationConstants.Environment.USER.name());
       conf.set(MRJobConfig.USER_NAME, jobUserName);
-      // Do not automatically close FileSystem objects so that in case of
-      // SIGTERM I have a chance to write out the job history. I'll be closing
-      // the objects myself.
-      conf.setBoolean("fs.automatic.close", false);
       initAndStartAppMaster(appMaster, conf, jobUserName);
     } catch (Throwable t) {
       LOG.fatal("Error starting MRAppMaster", t);


[17/50] [abbrv] hadoop git commit: HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by Dongming Liang.

Posted by ji...@apache.org.
HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one. Contributed by Dongming Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/521a196d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/521a196d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/521a196d

Branch: refs/heads/HDFS-7285
Commit: 521a196db7c509e2738ce2e4d712cb347bfa2dca
Parents: 97adb9a
Author: Dongming Liang <do...@capitalone.com>
Authored: Wed Mar 4 17:47:05 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:24 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java  | 4 +++-
 .../java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java | 3 +--
 .../org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java | 2 +-
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java | 3 ++-
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java     | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java     | 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java  | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestReplication.java    | 3 ++-
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java   | 2 +-
 11 files changed, 19 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2be1a4c..d9008d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -706,6 +706,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7535. Utilize Snapshot diff report for distcp. (jing9)
 
+    HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
+    (Dongming Liang via shv)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
index 628c610..ce96ac9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
@@ -351,7 +352,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
       long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
       DatanodeID datanodeID, PeerCache peerCache) {
     // Path is used only for printing block and file information in debug
-    super(new Path("/blk_" + blockId + ":" + bpid + ":of:"+ file)/*too non path-like?*/,
+    super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
+                    ":" + bpid + ":of:"+ file)/*too non path-like?*/,
           1, verifyChecksum,
           checksum.getChecksumSize() > 0? checksum : null, 
           checksum.getBytesPerChecksum(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 754df2c..001f684 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -83,7 +83,6 @@ import java.util.concurrent.Future;
 public class DataStorage extends Storage {
 
   public final static String BLOCK_SUBDIR_PREFIX = "subdir";
-  final static String BLOCK_FILE_PREFIX = "blk_";
   final static String COPY_FILE_PREFIX = "dncp_";
   final static String STORAGE_DIR_DETACHED = "detach";
   public final static String STORAGE_DIR_RBW = "rbw";
@@ -1250,7 +1249,7 @@ public class DataStorage extends Storage {
     String[] blockNames = from.list(new java.io.FilenameFilter() {
       @Override
       public boolean accept(File dir, String name) {
-        return name.startsWith(BLOCK_FILE_PREFIX);
+        return name.startsWith(Block.BLOCK_FILE_PREFIX);
       }
     });
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index c7ee21e..01f967f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -633,7 +633,7 @@ public class DirectoryScanner implements Runnable {
           continue;
         }
         if (!Block.isBlockFilename(files[i])) {
-          if (isBlockMetaFile("blk_", files[i].getName())) {
+          if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, files[i].getName())) {
             long blockId = Block.getBlockId(files[i].getName());
             verifyFileLocation(files[i].getParentFile(), bpFinalizedDir,
                 blockId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 297a47d..744db62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -430,7 +430,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
     @Override
     public boolean accept(File dir, String name) {
-      return !name.endsWith(".meta") && name.startsWith("blk_");
+      return !name.endsWith(".meta") &&
+              name.startsWith(Block.BLOCK_FILE_PREFIX);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 3c7918f..3d5c251 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -302,7 +302,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
         out.println(sb.toString());
         sb.append(" for blockIds: \n");
         for (String blk: blocks) {
-          if(blk == null || !blk.contains("blk_")) {
+          if(blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) {
             out.println("Incorrect blockId format: " + blk);
             continue;
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 2c1d07e..834eb32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2544,8 +2544,8 @@ public class MiniDFSCluster {
       return null;
     }
     for (File f : files) {
-      if (f.getName().startsWith("blk_") && f.getName().endsWith(
-          Block.METADATA_EXTENSION)) {
+      if (f.getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
+              f.getName().endsWith(Block.METADATA_EXTENSION)) {
         results.add(f);
       } else if (f.isDirectory()) {
         List<File> subdirResults = getAllBlockMetadataFiles(f);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index 969cdd5..f0c4c42 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClientFaultInjector;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Before;
@@ -176,7 +177,7 @@ public class TestCrcCorruption {
       assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
       int num = 0;
       for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith("blk_") &&
+        if (blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
             blocks[idx].getName().endsWith(".meta")) {
           num++;
           if (num % 3 == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 7d3946a..8001bfb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -77,7 +77,7 @@ public class TestFileCorruption {
       File[] blocks = data_dir.listFiles();
       assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
       for (int idx = 0; idx < blocks.length; idx++) {
-        if (!blocks[idx].getName().startsWith("blk_")) {
+        if (!blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX)) {
           continue;
         }
         System.out.println("Deliberately removing file "+blocks[idx].getName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index d116f82..5351406 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
@@ -511,7 +512,7 @@ public class TestReplication {
       String blockFile = null;
       File[] listFiles = participatedNodeDirs.listFiles();
       for (File file : listFiles) {
-        if (file.getName().startsWith("blk_")
+        if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX)
             && !file.getName().endsWith("meta")) {
           blockFile = file.getName();
           for (File file1 : nonParticipatedNodeDirs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/521a196d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 6b9c4b1..d9ad96b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -319,7 +319,7 @@ public class TestDataNodeVolumeFailure {
   private boolean deteteBlocks(File dir) {
     File [] fileList = dir.listFiles();
     for(File f : fileList) {
-      if(f.getName().startsWith("blk_")) {
+      if(f.getName().startsWith(Block.BLOCK_FILE_PREFIX)) {
         if(!f.delete())
           return false;
         


[25/50] [abbrv] hadoop git commit: YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong

Posted by ji...@apache.org.
YARN-1809. Synchronize RM and TimeLineServer Web-UIs. Contributed by Zhijie Shen and Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70703472
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70703472
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70703472

Branch: refs/heads/HDFS-7285
Commit: 70703472f46b2c722616e6af8e654c0798c04814
Parents: d8bb732
Author: Jian He <ji...@apache.org>
Authored: Thu Mar 5 21:14:41 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../dev-support/findbugs-exclude.xml            |   5 +-
 .../yarn/api/ApplicationBaseProtocol.java       | 355 +++++++++++++++++++
 .../yarn/api/ApplicationClientProtocol.java     | 290 +--------------
 .../yarn/api/ApplicationHistoryProtocol.java    | 303 +---------------
 .../apache/hadoop/yarn/webapp/ResponseInfo.java |   6 +-
 .../hadoop/yarn/webapp/YarnWebParams.java       |   4 +
 .../hadoop/yarn/webapp/view/HtmlBlock.java      |   2 +
 .../ApplicationHistoryClientService.java        | 185 +++++-----
 .../ApplicationHistoryManager.java              | 126 ++++++-
 .../ApplicationHistoryServer.java               |   2 +-
 .../webapp/AHSView.java                         |  28 +-
 .../webapp/AHSWebApp.java                       |  16 +-
 .../webapp/AHSWebServices.java                  |   6 +-
 .../webapp/AppAttemptPage.java                  |  15 +-
 .../webapp/AppPage.java                         |  21 +-
 .../TestApplicationHistoryClientService.java    |  12 +-
 .../webapp/TestAHSWebApp.java                   |  27 +-
 .../webapp/TestAHSWebServices.java              |  26 +-
 .../yarn/server/api/ApplicationContext.java     | 122 -------
 .../yarn/server/webapp/AppAttemptBlock.java     | 119 ++++---
 .../hadoop/yarn/server/webapp/AppBlock.java     | 274 ++++++++++++--
 .../hadoop/yarn/server/webapp/AppsBlock.java    |  53 ++-
 .../yarn/server/webapp/ContainerBlock.java      |  29 +-
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  86 +++++
 .../hadoop/yarn/server/webapp/WebServices.java  |  68 +++-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  11 +-
 .../resourcemanager/webapp/AppAttemptPage.java  |  55 +++
 .../server/resourcemanager/webapp/AppBlock.java | 344 ------------------
 .../server/resourcemanager/webapp/AppPage.java  |  25 +-
 .../resourcemanager/webapp/AppsBlock.java       | 132 -------
 .../webapp/AppsBlockWithMetrics.java            |   1 +
 .../webapp/CapacitySchedulerPage.java           |   1 +
 .../resourcemanager/webapp/ContainerPage.java   |  44 +++
 .../webapp/DefaultSchedulerPage.java            |   1 +
 .../webapp/FairSchedulerPage.java               |  21 +-
 .../server/resourcemanager/webapp/RMWebApp.java |   5 +
 .../resourcemanager/webapp/RmController.java    |   8 +
 .../server/resourcemanager/webapp/RmView.java   |  31 +-
 .../resourcemanager/webapp/TestAppPage.java     |   8 +-
 .../resourcemanager/webapp/TestRMWebApp.java    |  48 ++-
 .../webapp/TestRMWebAppFairScheduler.java       |  14 +-
 42 files changed, 1376 insertions(+), 1556 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dcf328f..accde78 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -360,6 +360,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3122. Metrics for container's actual CPU usage. 
     (Anubhav Dhoot via kasha)
 
+    YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
+    Xuan Gong via jianhe)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 1c3f201..a89884a 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -63,9 +63,12 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
   <Match>
-    <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmapp\.attempt\.RMAppAttemptMetrics" />
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="getLocalityStatistics" />
     <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
new file mode 100644
index 0000000..2a8a283
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * <p>
+ * The protocol between clients and the <code>ResourceManager</code> or
+ * <code>ApplicationHistoryServer</code> to get information on applications,
+ * application attempts and containers.
+ * </p>
+ *
+ */
+@Private
+@Unstable
+public interface ApplicationBaseProtocol {
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of an Application from the
+   * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetApplicationReportRequest} provides the
+   * {@link ApplicationId} of the application.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the application,
+   * queue etc. before accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationReportResponse} which includes the
+   * {@link ApplicationReport} for the application.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access then the following
+   * fields in the report will be set to stubbed values:
+   * <ul>
+   * <li>host - set to "N/A"</li>
+   * <li>RPC port - set to -1</li>
+   * <li>client token - set to "N/A"</li>
+   * <li>diagnostics - set to "N/A"</li>
+   * <li>tracking URL - set to "N/A"</li>
+   * <li>original tracking URL - set to "N/A"</li>
+   * <li>resource usage report - all values are -1</li>
+   * </ul>
+   * </p>
+   *
+   * @param request
+   *          request for an application report
+   * @return application report
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Stable
+  @Idempotent
+  public GetApplicationReportResponse getApplicationReport(
+      GetApplicationReportRequest request) throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of Applications matching the
+   * filters defined by {@link GetApplicationsRequest} in the cluster from the
+   * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationsResponse} which includes the
+   * {@link ApplicationReport} for the applications.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationReport(GetApplicationReportRequest)}.
+   * </p>
+   *
+   * @param request
+   *          request for report on applications
+   * @return report on applications matching the given application types defined
+   *         in the request
+   * @throws YarnException
+   * @throws IOException
+   * @see GetApplicationsRequest
+   */
+  @Public
+  @Stable
+  @Idempotent
+  public GetApplicationsResponse
+      getApplications(GetApplicationsRequest request) throws YarnException,
+          IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of an Application Attempt
+   * from the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetApplicationAttemptReportRequest} provides the
+   * {@link ApplicationAttemptId} of the application attempt.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the method before
+   * accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationAttemptReportResponse} which includes
+   * the {@link ApplicationAttemptReport} for the application attempt.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access then the following
+   * fields in the report will be set to stubbed values:
+   * <ul>
+   * <li>host</li>
+   * <li>RPC port</li>
+   * <li>client token</li>
+   * <li>diagnostics - set to "N/A"</li>
+   * <li>tracking URL</li>
+   * </ul>
+   * </p>
+   *
+   * @param request
+   *          request for an application attempt report
+   * @return application attempt report
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
+      GetApplicationAttemptReportRequest request) throws YarnException,
+      IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of all Application attempts
+   * in the cluster from the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetApplicationAttemptsRequest} which includes the
+   * {@link ApplicationAttemptReport} for all the applications attempts of a
+   * specified application attempt.
+   * </p>
+   *
+   * <p>
+   * If the user does not have <code>VIEW_APP</code> access for an application
+   * then the corresponding report will be filtered as described in
+   * {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
+   * </p>
+   *
+   * @param request
+   *          request for reports on all application attempts of an application
+   * @return reports on all application attempts of an application
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetApplicationAttemptsResponse getApplicationAttempts(
+      GetApplicationAttemptsRequest request) throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of an Container from the
+   * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetContainerReportRequest} provides the
+   * {@link ContainerId} of the container.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the method before
+   * accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetContainerReportResponse} which includes the
+   * {@link ContainerReport} for the container.
+   * </p>
+   *
+   * @param request
+   *          request for a container report
+   * @return container report
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetContainerReportResponse getContainerReport(
+      GetContainerReportRequest request) throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get a report of Containers for an
+   * application attempt from the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code>
+   * </p>
+   *
+   * <p>
+   * The client, via {@link GetContainersRequest} provides the
+   * {@link ApplicationAttemptId} of the application attempt.
+   * </p>
+   *
+   * <p>
+   * In secure mode,the <code>ResourceManager</code> or
+   * <code>ApplicationHistoryServer</code> verifies access to the method before
+   * accepting the request.
+   * </p>
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with a {@link GetContainersResponse} which includes a list of
+   * {@link ContainerReport} for all the containers of a specific application
+   * attempt.
+   * </p>
+   *
+   * @param request
+   *          request for a list of container reports of an application attempt.
+   * @return reports on all containers of an application attempt
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  @Idempotent
+  public GetContainersResponse getContainers(GetContainersRequest request)
+      throws YarnException, IOException;
+
+  /**
+   * <p>
+   * The interface used by clients to get delegation token, enabling the
+   * containers to be able to talk to the service using those tokens.
+   *
+   * <p>
+   * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
+   * responds with the delegation {@link Token} that can be used by the client
+   * to speak to this service.
+   *
+   * @param request
+   *          request to get a delegation token for the client.
+   * @return delegation token that can be used to talk to this service
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Stable
+  @Idempotent
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) throws YarnException, IOException;
+
+  /**
+   * Renew an existing delegation {@link Token}.
+   *
+   * @param request
+   *          the delegation token to be renewed.
+   * @return the new expiry time for the delegation token.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Private
+  @Unstable
+  @Idempotent
+  public RenewDelegationTokenResponse renewDelegationToken(
+      RenewDelegationTokenRequest request) throws YarnException, IOException;
+
+  /**
+   * Cancel an existing delegation {@link Token}.
+   *
+   * @param request
+   *          the delegation token to be cancelled.
+   * @return an empty response.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Private
+  @Unstable
+  @Idempotent
+  public CancelDelegationTokenResponse cancelDelegationToken(
+      CancelDelegationTokenRequest request) throws YarnException, IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
index b5f5cc0..0a7d415 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
@@ -20,33 +20,17 @@ package org.apache.hadoop.yarn.api;
 
 import java.io.IOException;
 
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.io.retry.Idempotent;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
@@ -61,8 +45,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
@@ -71,19 +53,13 @@ import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
@@ -96,7 +72,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
  */
 @Public
 @Stable
-public interface ApplicationClientProtocol {
+public interface ApplicationClientProtocol extends ApplicationBaseProtocol {
   /**
    * <p>The interface used by clients to obtain a new {@link ApplicationId} for 
    * submitting new applications.</p>
@@ -199,44 +175,6 @@ public interface ApplicationClientProtocol {
   throws YarnException, IOException;
 
   /**
-   * <p>The interface used by clients to get a report of an Application from
-   * the <code>ResourceManager</code>.</p>
-   * 
-   * <p>The client, via {@link GetApplicationReportRequest} provides the
-   * {@link ApplicationId} of the application.</p>
-   *
-   * <p> In secure mode,the <code>ResourceManager</code> verifies access to the
-   * application, queue etc. before accepting the request.</p> 
-   * 
-   * <p>The <code>ResourceManager</code> responds with a 
-   * {@link GetApplicationReportResponse} which includes the 
-   * {@link ApplicationReport} for the application.</p>
-   * 
-   * <p>If the user does not have <code>VIEW_APP</code> access then the
-   * following fields in the report will be set to stubbed values:
-   * <ul>
-   *   <li>host - set to "N/A"</li>
-   *   <li>RPC port - set to -1</li>
-   *   <li>client token - set to "N/A"</li>
-   *   <li>diagnostics - set to "N/A"</li>
-   *   <li>tracking URL - set to "N/A"</li>
-   *   <li>original tracking URL - set to "N/A"</li>
-   *   <li>resource usage report - all values are -1</li>
-   * </ul></p>
-   *
-   * @param request request for an application report
-   * @return application report 
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Stable
-  @Idempotent
-  public GetApplicationReportResponse getApplicationReport(
-      GetApplicationReportRequest request) 
-  throws YarnException, IOException;
-  
-  /**
    * <p>The interface used by clients to get metrics about the cluster from
    * the <code>ResourceManager</code>.</p>
    * 
@@ -256,35 +194,7 @@ public interface ApplicationClientProtocol {
   public GetClusterMetricsResponse getClusterMetrics(
       GetClusterMetricsRequest request) 
   throws YarnException, IOException;
-  
-  /**
-   * <p>The interface used by clients to get a report of Applications
-   * matching the filters defined by {@link GetApplicationsRequest}
-   * in the cluster from the <code>ResourceManager</code>.</p>
-   * 
-   * <p>The <code>ResourceManager</code> responds with a 
-   * {@link GetApplicationsResponse} which includes the
-   * {@link ApplicationReport} for the applications.</p>
-   * 
-   * <p>If the user does not have <code>VIEW_APP</code> access for an
-   * application then the corresponding report will be filtered as
-   * described in {@link #getApplicationReport(GetApplicationReportRequest)}.
-   * </p>
-   *
-   * @param request request for report on applications
-   * @return report on applications matching the given application types
-   *           defined in the request
-   * @throws YarnException
-   * @throws IOException
-   * @see GetApplicationsRequest
-   */
-  @Public
-  @Stable
-  @Idempotent
-  public GetApplicationsResponse getApplications(
-      GetApplicationsRequest request)
-  throws YarnException, IOException;
-  
+
   /**
    * <p>The interface used by clients to get a report of all nodes
    * in the cluster from the <code>ResourceManager</code>.</p>
@@ -346,57 +256,8 @@ public interface ApplicationClientProtocol {
   public GetQueueUserAclsInfoResponse getQueueUserAcls(
       GetQueueUserAclsInfoRequest request) 
   throws YarnException, IOException;
-  
-  /**
-   * <p>The interface used by clients to get delegation token, enabling the 
-   * containers to be able to talk to the service using those tokens.
-   * 
-   *  <p> The <code>ResourceManager</code> responds with the delegation
-   *  {@link Token} that can be used by the client to speak to this
-   *  service.
-   * @param request request to get a delegation token for the client.
-   * @return delegation token that can be used to talk to this service
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Stable
-  @Idempotent
-  public GetDelegationTokenResponse getDelegationToken(
-      GetDelegationTokenRequest request) 
-  throws YarnException, IOException;
-  
-  /**
-   * Renew an existing delegation {@link Token}.
-   * 
-   * @param request the delegation token to be renewed.
-   * @return the new expiry time for the delegation token.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  @Idempotent
-  public RenewDelegationTokenResponse renewDelegationToken(
-      RenewDelegationTokenRequest request) throws YarnException,
-      IOException;
 
   /**
-   * Cancel an existing delegation {@link Token}.
-   * 
-   * @param request the delegation token to be cancelled.
-   * @return an empty response.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  @Idempotent
-  public CancelDelegationTokenResponse cancelDelegationToken(
-      CancelDelegationTokenRequest request) throws YarnException,
-      IOException;
-  
-  /**
    * Move an application to a new queue.
    * 
    * @param request the application ID and the target queue
@@ -412,153 +273,6 @@ public interface ApplicationClientProtocol {
 
   /**
    * <p>
-   * The interface used by clients to get a report of an Application Attempt
-   * from the <code>ResourceManager</code> 
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetApplicationAttemptReportRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ResourceManager</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetApplicationAttemptReportResponse} which includes the
-   * {@link ApplicationAttemptReport} for the application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access then the following
-   * fields in the report will be set to stubbed values:
-   * <ul>
-   * <li>host</li>
-   * <li>RPC port</li>
-   * <li>client token</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL</li>
-   * </ul>
-   * </p>
-   * 
-   * @param request
-   *          request for an application attempt report
-   * @return application attempt report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
-      GetApplicationAttemptReportRequest request) throws YarnException,
-      IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of all Application attempts
-   * in the cluster from the <code>ResourceManager</code>
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetApplicationAttemptsRequest} which includes the
-   * {@link ApplicationAttemptReport} for all the applications attempts of a
-   * specified application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access for an application
-   * then the corresponding report will be filtered as described in
-   * {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
-   * </p>
-   * 
-   * @param request
-   *          request for reports on all application attempts of an application
-   * @return reports on all application attempts of an application
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetApplicationAttemptsResponse getApplicationAttempts(
-      GetApplicationAttemptsRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Container from the
-   * <code>ResourceManager</code>
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainerReportRequest} provides the
-   * {@link ContainerId} of the container.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ResourceManager</code> verifies access to the
-   * method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetContainerReportResponse} which includes the
-   * {@link ContainerReport} for the container.
-   * </p>
-   * 
-   * @param request
-   *          request for a container report
-   * @return container report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetContainerReportResponse getContainerReport(
-      GetContainerReportRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of Containers for an
-   * application attempt from the <code>ResourceManager</code>
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainersRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ResourceManager</code> verifies access to the
-   * method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ResourceManager</code> responds with a
-   * {@link GetContainersResponse} which includes a list of
-   * {@link ContainerReport} for all the containers of a specific application
-   * attempt.
-   * </p>
-   * 
-   * @param request
-   *          request for a list of container reports of an application attempt.
-   * @return reports on all containers of an application attempt
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  @Idempotent
-  public GetContainersResponse getContainers(GetContainersRequest request)
-      throws YarnException, IOException;
-
-  /**
-   * <p>
    * The interface used by clients to submit a new reservation to the
    * {@code ResourceManager}.
    * </p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
index 0bfd2ed..fc8e885 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationHistoryProtocol.java
@@ -18,37 +18,8 @@
 
 package org.apache.hadoop.yarn.api;
 
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.api.records.Token;
-import org.apache.hadoop.yarn.exceptions.YarnException;
 
 /**
  * <p>
@@ -58,277 +29,5 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
  */
 @Public
 @Unstable
-public interface ApplicationHistoryProtocol {
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Application from the
-   * <code>ResourceManager</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetApplicationReportRequest} provides the
-   * {@link ApplicationId} of the application.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the application, queue etc. before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationReportResponse} which includes the
-   * {@link ApplicationReport} for the application.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access then the following
-   * fields in the report will be set to stubbed values:
-   * <ul>
-   * <li>host - set to "N/A"</li>
-   * <li>RPC port - set to -1</li>
-   * <li>client token - set to "N/A"</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL - set to "N/A"</li>
-   * <li>original tracking URL - set to "N/A"</li>
-   * <li>resource usage report - all values are -1</li>
-   * </ul>
-   * </p>
-   * 
-   * @param request
-   *          request for an application report
-   * @return application report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationReportResponse getApplicationReport(
-      GetApplicationReportRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of all Applications in the
-   * cluster from the <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationsResponse} which includes a list of
-   * {@link ApplicationReport} for all the applications.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access for an application
-   * then the corresponding report will be filtered as described in
-   * {@link #getApplicationReport(GetApplicationReportRequest)}.
-   * </p>
-   * 
-   * @param request
-   *          request for reports on all the applications
-   * @return report on applications matching the given application types defined
-   *         in the request
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationsResponse
-      getApplications(GetApplicationsRequest request) throws YarnException,
-          IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Application Attempt
-   * from the <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetApplicationAttemptReportRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationAttemptReportResponse} which includes the
-   * {@link ApplicationAttemptReport} for the application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access then the following
-   * fields in the report will be set to stubbed values:
-   * <ul>
-   * <li>host</li>
-   * <li>RPC port</li>
-   * <li>client token</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL</li>
-   * </ul>
-   * </p>
-   * 
-   * @param request
-   *          request for an application attempt report
-   * @return application attempt report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
-      GetApplicationAttemptReportRequest request) throws YarnException,
-      IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of all Application attempts
-   * in the cluster from the <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetApplicationAttemptsRequest} which includes the
-   * {@link ApplicationAttemptReport} for all the applications attempts of a
-   * specified application attempt.
-   * </p>
-   * 
-   * <p>
-   * If the user does not have <code>VIEW_APP</code> access for an application
-   * then the corresponding report will be filtered as described in
-   * {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
-   * </p>
-   * 
-   * @param request
-   *          request for reports on all application attempts of an application
-   * @return reports on all application attempts of an application
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetApplicationAttemptsResponse getApplicationAttempts(
-      GetApplicationAttemptsRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of an Container from the
-   * <code>ApplicationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainerReportRequest} provides the
-   * {@link ContainerId} of the container.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetContainerReportResponse} which includes the
-   * {@link ContainerReport} for the container.
-   * </p>
-   * 
-   * @param request
-   *          request for a container report
-   * @return container report
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetContainerReportResponse getContainerReport(
-      GetContainerReportRequest request) throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get a report of Containers for an
-   * application attempt from the <code>ApplciationHistoryServer</code>.
-   * </p>
-   * 
-   * <p>
-   * The client, via {@link GetContainersRequest} provides the
-   * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   * 
-   * <p>
-   * In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
-   * the method before accepting the request.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with a
-   * {@link GetContainersResponse} which includes a list of
-   * {@link ContainerReport} for all the containers of a specific application
-   * attempt.
-   * </p>
-   * 
-   * @param request
-   *          request for a list of container reports of an application attempt.
-   * @return reports on all containers of an application attempt
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetContainersResponse getContainers(GetContainersRequest request)
-      throws YarnException, IOException;
-
-  /**
-   * <p>
-   * The interface used by clients to get delegation token, enabling the
-   * containers to be able to talk to the service using those tokens.
-   * </p>
-   * 
-   * <p>
-   * The <code>ApplicationHistoryServer</code> responds with the delegation
-   * token {@link Token} that can be used by the client to speak to this
-   * service.
-   * </p>
-   * 
-   * @param request
-   *          request to get a delegation token for the client.
-   * @return delegation token that can be used to talk to this service
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Public
-  @Unstable
-  public GetDelegationTokenResponse getDelegationToken(
-      GetDelegationTokenRequest request) throws YarnException, IOException;
-
-  /**
-   * Renew an existing delegation token.
-   * 
-   * @param request
-   *          the delegation token to be renewed.
-   * @return the new expiry time for the delegation token.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  public RenewDelegationTokenResponse renewDelegationToken(
-      RenewDelegationTokenRequest request) throws YarnException, IOException;
-
-  /**
-   * Cancel an existing delegation token.
-   * 
-   * @param request
-   *          the delegation token to be cancelled.
-   * @return an empty response.
-   * @throws YarnException
-   * @throws IOException
-   */
-  @Private
-  @Unstable
-  public CancelDelegationTokenResponse cancelDelegationToken(
-      CancelDelegationTokenRequest request) throws YarnException, IOException;
+public interface ApplicationHistoryProtocol extends ApplicationBaseProtocol {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
index 7e836b5..b04bc5d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
@@ -81,7 +81,11 @@ public class ResponseInfo implements Iterable<ResponseInfo.Item> {
   }
 
   public ResponseInfo _(String key, String url, Object anchor) {
-    items.add(Item.of(key, url, anchor));
+    if (url == null) {
+      items.add(Item.of(key, anchor, false));
+    } else {
+      items.add(Item.of(key, url, anchor));
+    }
     return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
index 62c3c7a..1200690 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
@@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public interface YarnWebParams {
+  static final String RM_WEB_UI = "ResourceManager";
+  static final String APP_HISTORY_WEB_UI = "ApplicationHistoryServer";
+  
   String NM_NODENAME = "nm.id";
   String APPLICATION_ID = "app.id";
   String APPLICATION_ATTEMPT_ID = "appattempt.id";
@@ -33,4 +36,5 @@ public interface YarnWebParams {
   String QUEUE_NAME = "queue.name";
   String NODE_STATE = "node.state";
   String NODE_LABEL = "node.label";
+  String WEB_UI_TYPE = "web.ui.type";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
index 6ee0d1c..a785c0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
 public abstract class HtmlBlock extends TextView implements SubView {
 
+  protected static final String UNAVAILABLE = "N/A";
+
   public class Block extends Hamlet {
     Block(PrintWriter out, int level, boolean wasInline) {
       super(out, level, wasInline);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
index 8da1ea1..e64ca14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
@@ -56,27 +56,23 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
-import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
-import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
 
 import com.google.common.base.Preconditions;
 
-public class ApplicationHistoryClientService extends AbstractService {
+public class ApplicationHistoryClientService extends AbstractService implements
+    ApplicationHistoryProtocol {
   private static final Log LOG = LogFactory
     .getLog(ApplicationHistoryClientService.class);
   private ApplicationHistoryManager history;
-  private ApplicationHistoryProtocol protocolHandler;
   private Server server;
   private InetSocketAddress bindAddress;
 
   public ApplicationHistoryClientService(ApplicationHistoryManager history) {
     super("ApplicationHistoryClientService");
     this.history = history;
-    this.protocolHandler = new ApplicationHSClientProtocolHandler();
   }
 
   protected void serviceStart() throws Exception {
@@ -95,7 +91,7 @@ public class ApplicationHistoryClientService extends AbstractService {
         YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT);
 
     server =
-        rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler,
+        rpc.getServer(ApplicationHistoryProtocol.class, this,
           address, conf, null, conf.getInt(
             YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
             YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT));
@@ -127,11 +123,6 @@ public class ApplicationHistoryClientService extends AbstractService {
   }
 
   @Private
-  public ApplicationHistoryProtocol getClientHandler() {
-    return this.protocolHandler;
-  }
-
-  @Private
   public InetSocketAddress getBindAddress() {
     return this.bindAddress;
   }
@@ -141,109 +132,97 @@ public class ApplicationHistoryClientService extends AbstractService {
     this.server.refreshServiceAcl(configuration, policyProvider);
   }
 
-  private class ApplicationHSClientProtocolHandler implements
-      ApplicationHistoryProtocol {
-
-    @Override
-    public CancelDelegationTokenResponse cancelDelegationToken(
-        CancelDelegationTokenRequest request) throws YarnException, IOException {
-      // TODO Auto-generated method stub
-      return null;
-    }
-
-    @Override
-    public GetApplicationAttemptReportResponse getApplicationAttemptReport(
-        GetApplicationAttemptReportRequest request) throws YarnException,
-        IOException {
-      ApplicationAttemptId appAttemptId = request.getApplicationAttemptId();
-      try {
-        GetApplicationAttemptReportResponse response =
-            GetApplicationAttemptReportResponse.newInstance(history
-              .getApplicationAttempt(appAttemptId));
-        return response;
-      } catch (IOException e) {
-        String msg = "ApplicationAttempt with id '" + appAttemptId +
-            "' doesn't exist in the history store.";
-        LOG.error(msg, e);
-        throw new ApplicationAttemptNotFoundException(msg);
-      }
-    }
+  @Override
+  public CancelDelegationTokenResponse cancelDelegationToken(
+      CancelDelegationTokenRequest request) throws YarnException, IOException {
+    // TODO Auto-generated method stub
+    return null;
+  }
 
-    @Override
-    public GetApplicationAttemptsResponse getApplicationAttempts(
-        GetApplicationAttemptsRequest request) throws YarnException,
-        IOException {
-      GetApplicationAttemptsResponse response =
-          GetApplicationAttemptsResponse
-            .newInstance(new ArrayList<ApplicationAttemptReport>(history
-              .getApplicationAttempts(request.getApplicationId()).values()));
+  @Override
+  public GetApplicationAttemptReportResponse getApplicationAttemptReport(
+      GetApplicationAttemptReportRequest request) throws YarnException,
+      IOException {
+    ApplicationAttemptId appAttemptId = request.getApplicationAttemptId();
+    try {
+      GetApplicationAttemptReportResponse response =
+          GetApplicationAttemptReportResponse.newInstance(history
+            .getApplicationAttempt(appAttemptId));
       return response;
+    } catch (IOException e) {
+      LOG.error(e.getMessage(), e);
+      throw e;
     }
+  }
 
-    @Override
-    public GetApplicationReportResponse getApplicationReport(
-        GetApplicationReportRequest request) throws YarnException, IOException {
-      ApplicationId applicationId = request.getApplicationId();
-      try {
-        GetApplicationReportResponse response =
-            GetApplicationReportResponse.newInstance(history
-              .getApplication(applicationId));
-        return response;
-      } catch (IOException e) {
-        String msg = "Application with id '" + applicationId +
-            "' doesn't exist in the history store.";
-        LOG.error(msg, e);
-        throw new ApplicationNotFoundException(msg);
-      }
-    }
+  @Override
+  public GetApplicationAttemptsResponse getApplicationAttempts(
+      GetApplicationAttemptsRequest request) throws YarnException, IOException {
+    GetApplicationAttemptsResponse response =
+        GetApplicationAttemptsResponse
+          .newInstance(new ArrayList<ApplicationAttemptReport>(history
+            .getApplicationAttempts(request.getApplicationId()).values()));
+    return response;
+  }
 
-    @Override
-    public GetApplicationsResponse getApplications(
-        GetApplicationsRequest request) throws YarnException, IOException {
-      GetApplicationsResponse response =
-          GetApplicationsResponse.newInstance(new ArrayList<ApplicationReport>(
-            history.getAllApplications().values()));
+  @Override
+  public GetApplicationReportResponse getApplicationReport(
+      GetApplicationReportRequest request) throws YarnException, IOException {
+    ApplicationId applicationId = request.getApplicationId();
+    try {
+      GetApplicationReportResponse response =
+          GetApplicationReportResponse.newInstance(history
+            .getApplication(applicationId));
       return response;
+    } catch (IOException e) {
+      LOG.error(e.getMessage(), e);
+      throw e;
     }
+  }
 
-    @Override
-    public GetContainerReportResponse getContainerReport(
-        GetContainerReportRequest request) throws YarnException, IOException {
-      ContainerId containerId = request.getContainerId();
-      try {
-        GetContainerReportResponse response =
-            GetContainerReportResponse.newInstance(
-                history.getContainer(containerId));
-        return response;
-      } catch (IOException e) {
-        String msg = "Container with id '" + containerId +
-            "' doesn't exist in the history store.";
-        LOG.error(msg, e);
-        throw new ContainerNotFoundException(msg);
-      }
-    }
+  @Override
+  public GetApplicationsResponse
+      getApplications(GetApplicationsRequest request) throws YarnException,
+          IOException {
+    GetApplicationsResponse response =
+        GetApplicationsResponse.newInstance(new ArrayList<ApplicationReport>(
+          history.getAllApplications().values()));
+    return response;
+  }
 
-    @Override
-    public GetContainersResponse getContainers(GetContainersRequest request)
-        throws YarnException, IOException {
-      GetContainersResponse response =
-          GetContainersResponse.newInstance(new ArrayList<ContainerReport>(
-            history.getContainers(request.getApplicationAttemptId()).values()));
+  @Override
+  public GetContainerReportResponse getContainerReport(
+      GetContainerReportRequest request) throws YarnException, IOException {
+    ContainerId containerId = request.getContainerId();
+    try {
+      GetContainerReportResponse response =
+          GetContainerReportResponse.newInstance(history
+            .getContainer(containerId));
       return response;
+    } catch (IOException e) {
+      LOG.error(e.getMessage(), e);
+      throw e;
     }
+  }
 
-    @Override
-    public GetDelegationTokenResponse getDelegationToken(
-        GetDelegationTokenRequest request) throws YarnException, IOException {
-      // TODO Auto-generated method stub
-      return null;
-    }
+  @Override
+  public GetContainersResponse getContainers(GetContainersRequest request)
+      throws YarnException, IOException {
+    GetContainersResponse response =
+        GetContainersResponse.newInstance(new ArrayList<ContainerReport>(
+          history.getContainers(request.getApplicationAttemptId()).values()));
+    return response;
+  }
 
-    @Override
-    public RenewDelegationTokenResponse renewDelegationToken(
-        RenewDelegationTokenRequest request) throws YarnException, IOException {
-      // TODO Auto-generated method stub
-      return null;
-    }
+  @Override
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) throws YarnException, IOException {
+    return null;
+  }
+
+  @Override
+  public RenewDelegationTokenResponse renewDelegationToken(
+      RenewDelegationTokenRequest request) throws YarnException, IOException {
+    return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
index db25d29..041c31b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManager.java
@@ -18,11 +18,125 @@
 
 package org.apache.hadoop.yarn.server.applicationhistoryservice;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+@Private
+@Unstable
+public interface ApplicationHistoryManager {
+  /**
+   * This method returns Application {@link ApplicationReport} for the specified
+   * {@link ApplicationId}.
+   * 
+   * @param appId
+   * 
+   * @return {@link ApplicationReport} for the ApplicationId.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ApplicationReport getApplication(ApplicationId appId) throws YarnException,
+      IOException;
+
+  /**
+   * This method returns all Application {@link ApplicationReport}s
+   * 
+   * @return map of {@link ApplicationId} to {@link ApplicationReport}s.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  Map<ApplicationId, ApplicationReport> getAllApplications()
+      throws YarnException, IOException;
+
+  /**
+   * Application can have multiple application attempts
+   * {@link ApplicationAttemptReport}. This method returns the all
+   * {@link ApplicationAttemptReport}s for the Application.
+   * 
+   * @param appId
+   * 
+   * @return all {@link ApplicationAttemptReport}s for the Application.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  Map<ApplicationAttemptId, ApplicationAttemptReport> getApplicationAttempts(
+      ApplicationId appId) throws YarnException, IOException;
+
+  /**
+   * This method returns {@link ApplicationAttemptReport} for specified
+   * {@link ApplicationId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return {@link ApplicationAttemptReport} for ApplicationAttemptId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ApplicationAttemptReport getApplicationAttempt(
+      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
+
+  /**
+   * This method returns {@link ContainerReport} for specified
+   * {@link ContainerId}.
+   * 
+   * @param containerId
+   *          {@link ContainerId}
+   * @return {@link ContainerReport} for ContainerId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ContainerReport getContainer(ContainerId containerId) throws YarnException,
+      IOException;
+
+  /**
+   * This method returns {@link ContainerReport} for specified
+   * {@link ApplicationAttemptId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return {@link ContainerReport} for ApplicationAttemptId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  ContainerReport getAMContainer(ApplicationAttemptId appAttemptId)
+      throws YarnException, IOException;
+
+  /**
+   * This method returns Map of {@link ContainerId} to {@link ContainerReport}
+   * for specified {@link ApplicationAttemptId}.
+   * 
+   * @param appAttemptId
+   *          {@link ApplicationAttemptId}
+   * @return Map of {@link ContainerId} to {@link ContainerReport} for
+   *         ApplicationAttemptId
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  Map<ContainerId, ContainerReport> getContainers(
+      ApplicationAttemptId appAttemptId) throws YarnException, IOException;
 
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public interface ApplicationHistoryManager extends ApplicationContext {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 0bafd36..88cd153 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -272,7 +272,7 @@ public class ApplicationHistoryServer extends CompositeService {
             .$for("applicationhistory", ApplicationHistoryClientService.class,
                 ahsClientService, "ws")
             .with(conf).at(bindAddress).start(
-                new AHSWebApp(timelineDataManager, historyManager));
+                new AHSWebApp(timelineDataManager, ahsClientService));
     } catch (Exception e) {
       String msg = "AHSWebApp failed to start.";
       LOG.error(msg, e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
index 4baa75d..152364e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
@@ -25,9 +25,8 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
-
 import org.apache.hadoop.yarn.server.webapp.AppsBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
 
@@ -41,7 +40,7 @@ public class AHSView extends TwoColumnLayout {
   protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "apps");
-    set(initID(DATATABLES, "apps"), appsTableInit());
+    set(initID(DATATABLES, "apps"), WebPageUtils.appsTableInit());
     setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
 
     // Set the correct title.
@@ -64,27 +63,4 @@ public class AHSView extends TwoColumnLayout {
   protected Class<? extends SubView> content() {
     return AppsBlock.class;
   }
-
-  private String appsTableInit() {
-    // id, user, name, queue, starttime, finishtime, state, status, progress, ui
-    return tableInit().append(", 'aaData': appsTableData")
-      .append(", bDeferRender: true").append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ").append(getAppsTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-
-  protected String getAppsTableColumnDefs() {
-    StringBuilder sb = new StringBuilder();
-    return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")
-      .append(", 'mRender': parseHadoopID }")
-
-      .append("\n, {'sType':'numeric', 'aTargets': [5, 6]")
-      .append(", 'mRender': renderHadoopDate }")
-
-      .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [9]")
-      .append(", 'mRender': parseHadoopProgress }]").toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
index 814752b..4b579c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
 
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryClientService;
 import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.webapp.TimelineWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
@@ -30,17 +30,17 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
 public class AHSWebApp extends WebApp implements YarnWebParams {
 
-  private ApplicationHistoryManager applicationHistoryManager;
+  private final ApplicationHistoryClientService historyClientService;
   private TimelineDataManager timelineDataManager;
 
   public AHSWebApp(TimelineDataManager timelineDataManager,
-      ApplicationHistoryManager applicationHistoryManager) {
+      ApplicationHistoryClientService historyClientService) {
     this.timelineDataManager = timelineDataManager;
-    this.applicationHistoryManager = applicationHistoryManager;
+    this.historyClientService = historyClientService;
   }
 
-  public ApplicationHistoryManager getApplicationHistoryManager() {
-    return applicationHistoryManager;
+  public ApplicationHistoryClientService getApplicationHistoryClientService() {
+    return historyClientService;
   }
 
   public TimelineDataManager getTimelineDataManager() {
@@ -53,7 +53,7 @@ public class AHSWebApp extends WebApp implements YarnWebParams {
     bind(AHSWebServices.class);
     bind(TimelineWebServices.class);
     bind(GenericExceptionHandler.class);
-    bind(ApplicationContext.class).toInstance(applicationHistoryManager);
+    bind(ApplicationBaseProtocol.class).toInstance(historyClientService);
     bind(TimelineDataManager.class).toInstance(timelineDataManager);
     route("/", AHSController.class);
     route(pajoin("/apps", APP_STATE), AHSController.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 2af4027..9edc9ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -33,7 +33,7 @@ import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.server.api.ApplicationContext;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
@@ -51,8 +51,8 @@ import com.google.inject.Singleton;
 public class AHSWebServices extends WebServices {
 
   @Inject
-  public AHSWebServices(ApplicationContext appContext) {
-    super(appContext);
+  public AHSWebServices(ApplicationBaseProtocol appBaseProt) {
+    super(appBaseProt);
   }
 
   @GET

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
index 63b44bd..1e0a342 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java
@@ -21,9 +21,8 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
-
 import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
@@ -41,7 +40,7 @@ public class AppAttemptPage extends AHSView {
             $(YarnWebParams.APPLICATION_ATTEMPT_ID)));
 
     set(DATATABLES_ID, "containers");
-    set(initID(DATATABLES, "containers"), containersTableInit());
+    set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
     setTableStyles(html, "containers", ".queue {width:6em}", ".ui {width:8em}");
   }
 
@@ -50,16 +49,6 @@ public class AppAttemptPage extends AHSView {
     return AppAttemptBlock.class;
   }
 
-  private String containersTableInit() {
-    return tableInit().append(", 'aaData': containersTableData")
-      .append(", bDeferRender: true").append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ").append(getContainersTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-
   protected String getContainersTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
     return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
index 96ca659..cf92c1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java
@@ -22,9 +22,8 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
-
 import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
@@ -40,9 +39,13 @@ public class AppPage extends AHSView {
       appId.isEmpty() ? "Bad request: missing application ID" : join(
         "Application ", $(YarnWebParams.APPLICATION_ID)));
 
-    set(DATATABLES_ID, "attempts");
-    set(initID(DATATABLES, "attempts"), attemptsTableInit());
+    set(DATATABLES_ID, "attempts ResourceRequests");
+    set(initID(DATATABLES, "attempts"), WebPageUtils.attemptsTableInit());
     setTableStyles(html, "attempts", ".queue {width:6em}", ".ui {width:8em}");
+
+    setTableStyles(html, "ResourceRequests");
+
+    set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.APP_HISTORY_WEB_UI);
   }
 
   @Override
@@ -50,16 +53,6 @@ public class AppPage extends AHSView {
     return AppBlock.class;
   }
 
-  private String attemptsTableInit() {
-    return tableInit().append(", 'aaData': attemptsTableData")
-      .append(", bDeferRender: true").append(", bProcessing: true")
-
-      .append("\n, aoColumnDefs: ").append(getAttemptsTableColumnDefs())
-
-      // Sort by id upon page load
-      .append(", aaSorting: [[0, 'desc']]}").toString();
-  }
-
   protected String getAttemptsTableColumnDefs() {
     StringBuilder sb = new StringBuilder();
     return sb.append("[\n").append("{'sType':'numeric', 'aTargets': [0]")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70703472/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
index 32d011e..d03b26d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
@@ -77,7 +77,7 @@ public class TestApplicationHistoryClientService {
     GetApplicationReportRequest request =
         GetApplicationReportRequest.newInstance(appId);
     GetApplicationReportResponse response =
-        clientService.getClientHandler().getApplicationReport(request);
+        clientService.getApplicationReport(request);
     ApplicationReport appReport = response.getApplicationReport();
     Assert.assertNotNull(appReport);
     Assert.assertEquals(123, appReport.getApplicationResourceUsageReport()
@@ -98,7 +98,7 @@ public class TestApplicationHistoryClientService {
     ApplicationId appId1 = ApplicationId.newInstance(0, 2);
     GetApplicationsRequest request = GetApplicationsRequest.newInstance();
     GetApplicationsResponse response =
-        clientService.getClientHandler().getApplications(request);
+        clientService.getApplications(request);
     List<ApplicationReport> appReport = response.getApplicationList();
     Assert.assertNotNull(appReport);
     Assert.assertEquals(appId, appReport.get(0).getApplicationId());
@@ -113,7 +113,7 @@ public class TestApplicationHistoryClientService {
     GetApplicationAttemptReportRequest request =
         GetApplicationAttemptReportRequest.newInstance(appAttemptId);
     GetApplicationAttemptReportResponse response =
-        clientService.getClientHandler().getApplicationAttemptReport(request);
+        clientService.getApplicationAttemptReport(request);
     ApplicationAttemptReport attemptReport =
         response.getApplicationAttemptReport();
     Assert.assertNotNull(attemptReport);
@@ -131,7 +131,7 @@ public class TestApplicationHistoryClientService {
     GetApplicationAttemptsRequest request =
         GetApplicationAttemptsRequest.newInstance(appId);
     GetApplicationAttemptsResponse response =
-        clientService.getClientHandler().getApplicationAttempts(request);
+        clientService.getApplicationAttempts(request);
     List<ApplicationAttemptReport> attemptReports =
         response.getApplicationAttemptList();
     Assert.assertNotNull(attemptReports);
@@ -150,7 +150,7 @@ public class TestApplicationHistoryClientService {
     GetContainerReportRequest request =
         GetContainerReportRequest.newInstance(containerId);
     GetContainerReportResponse response =
-        clientService.getClientHandler().getContainerReport(request);
+        clientService.getContainerReport(request);
     ContainerReport container = response.getContainerReport();
     Assert.assertNotNull(container);
     Assert.assertEquals(containerId, container.getContainerId());
@@ -169,7 +169,7 @@ public class TestApplicationHistoryClientService {
     GetContainersRequest request =
         GetContainersRequest.newInstance(appAttemptId);
     GetContainersResponse response =
-        clientService.getClientHandler().getContainers(request);
+        clientService.getContainers(request);
     List<ContainerReport> containers = response.getContainerList();
     Assert.assertNotNull(containers);
     Assert.assertEquals(containerId, containers.get(0).getContainerId());


[44/50] [abbrv] hadoop git commit: Adding 2.8 section in CHANGES.txt

Posted by ji...@apache.org.
Adding 2.8 section in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1040f705
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1040f705
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1040f705

Branch: refs/heads/HDFS-7285
Commit: 1040f70590e978933d04d5164f53ac7355d1fdc3
Parents: f88e63e
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Sun Mar 8 20:24:33 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:17:55 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     | 12 ++++++++++++
 hadoop-mapreduce-project/CHANGES.txt            | 12 ++++++++++++
 hadoop-yarn-project/CHANGES.txt                 | 12 ++++++++++++
 4 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0af0beb..6f2c8c3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -426,6 +426,18 @@ Trunk (Unreleased)
 
     HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3cd6372..e106b1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -313,6 +313,18 @@ Trunk (Unreleased)
     HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks 
     fails using IBM java (Ayappan via aw)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 049b17d..8f06ac8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -245,6 +245,18 @@ Trunk (Unreleased)
 
     MAPREDUCE-6078. native-task: fix gtest build on macosx (Binglin Chang)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040f705/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f28e932..da8b02e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -45,6 +45,18 @@ Trunk - Unreleased
     YARN-2428. LCE default banned user list should have yarn (Varun
     Saxena via aw)
 
+Release 2.8.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[42/50] [abbrv] hadoop git commit: HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)

Posted by ji...@apache.org.
HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eed1645f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eed1645f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eed1645f

Branch: refs/heads/HDFS-7285
Commit: eed1645fd3a513a2a82cf76b4063a4baf4e819f9
Parents: fd63337
Author: Steve Loughran <st...@apache.org>
Authored: Sun Mar 8 11:20:42 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:27 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../org/apache/hadoop/fs/s3a/Constants.java     |  6 +++++-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 20 ++++++++++++++------
 .../src/site/markdown/tools/hadoop-aws/index.md | 10 +++++-----
 4 files changed, 26 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 14cd75a..16002d5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1050,6 +1050,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
     should be non static. (Sean Busbey via yliu)
 
+    HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index e7462dc..3486dfb 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.fs.s3a;
 
-
 public class Constants {
+  // s3 access key
+  public static final String ACCESS_KEY = "fs.s3a.access.key";
+
+  // s3 secret key
+  public static final String SECRET_KEY = "fs.s3a.secret.key";
 
   // number of simultaneous connections to s3
   public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 1a30d6f..91a606c 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -32,8 +32,6 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.fs.s3.S3Credentials;
-
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
 import com.amazonaws.ClientConfiguration;
@@ -159,12 +157,22 @@ public class S3AFileSystem extends FileSystem {
         this.getWorkingDirectory());
 
     // Try to get our credentials or just connect anonymously
-    S3Credentials s3Credentials = new S3Credentials();
-    s3Credentials.initialize(name, conf);
+    String accessKey = conf.get(ACCESS_KEY, null);
+    String secretKey = conf.get(SECRET_KEY, null);
+
+    String userInfo = name.getUserInfo();
+    if (userInfo != null) {
+      int index = userInfo.indexOf(':');
+      if (index != -1) {
+        accessKey = userInfo.substring(0, index);
+        secretKey = userInfo.substring(index + 1);
+      } else {
+        accessKey = userInfo;
+      }
+    }
 
     AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
-        new BasicAWSCredentialsProvider(s3Credentials.getAccessKey(),
-                                        s3Credentials.getSecretAccessKey()),
+        new BasicAWSCredentialsProvider(accessKey, secretKey),
         new InstanceProfileCredentialsProvider(),
         new AnonymousAWSCredentialsProvider()
     );

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed1645f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index bf62634..e0389c0 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -141,12 +141,12 @@ If you do any of these: change your credentials immediately!
 ### Authentication properties
 
     <property>
-      <name>fs.s3a.awsAccessKeyId</name>
+      <name>fs.s3a.access.key</name>
       <description>AWS access key ID. Omit for Role-based authentication.</description>
     </property>
 
     <property>
-      <name>fs.s3a.awsSecretAccessKey</name>
+      <name>fs.s3a.secret.key</name>
       <description>AWS secret key. Omit for Role-based authentication.</description>
     </property>
 
@@ -411,13 +411,13 @@ Example:
       </property>
 
       <property>
-        <name>fs.s3a.awsAccessKeyId</name>
+        <name>fs.s3a.access.key</name>
         <description>AWS access key ID. Omit for Role-based authentication.</description>
-        <value>DONOTPCOMMITTHISKEYTOSCM</value>
+        <value>DONOTCOMMITTHISKEYTOSCM</value>
       </property>
   
       <property>
-        <name>fs.s3a.awsSecretAccessKey</name>
+        <name>fs.s3a.secret.key</name>
         <description>AWS secret key. Omit for Role-based authentication.</description>
         <value>DONOTEVERSHARETHISSECRETKEY!</value>
       </property>


[20/50] [abbrv] hadoop git commit: YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving events for old client. (Zhihai Xu via kasha)

Posted by ji...@apache.org.
YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving events for old client. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31d3efe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31d3efe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31d3efe6

Branch: refs/heads/HDFS-7285
Commit: 31d3efe6fa0a643d0935ccb7780482dc3f4789b2
Parents: 27f8981
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Mar 4 19:47:02 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:24 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ha/ClientBaseWithFixes.java   | 11 +++-
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../recovery/ZKRMStateStore.java                | 53 ++++++++++++--------
 .../TestZKRMStateStoreZKClientConnections.java  | 33 +++++++++---
 4 files changed, 70 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d3efe6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
index 7d0727a..5f03133 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
@@ -90,6 +90,14 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
         // XXX this doesn't need to be volatile! (Should probably be final)
         volatile CountDownLatch clientConnected;
         volatile boolean connected;
+        protected ZooKeeper client;
+
+        public void initializeWatchedClient(ZooKeeper zk) {
+            if (client != null) {
+                throw new RuntimeException("Watched Client was already set");
+            }
+            client = zk;
+        }
 
         public CountdownWatcher() {
             reset();
@@ -191,8 +199,7 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
                 zk.close();
             }
         }
-
-
+        watcher.initializeWatchedClient(zk);
         return zk;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d3efe6/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9a52325..4dd61eb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -701,6 +701,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
     jobs. (Siqi Li via kasha)
 
+    YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
+    events for old client. (Zhihai Xu via kasha)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d3efe6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 591a551..614ef15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -153,7 +153,13 @@ public class ZKRMStateStore extends RMStateStore {
 
   @VisibleForTesting
   protected ZooKeeper zkClient;
-  private ZooKeeper oldZkClient;
+
+  /* activeZkClient is not used to do actual operations,
+   * it is only used to verify client session for watched events and
+   * it gets activated into zkClient on connection event.
+   */
+  @VisibleForTesting
+  ZooKeeper activeZkClient;
 
   /** Fencing related variables */
   private static final String FENCING_LOCK = "RM_ZK_FENCING_LOCK";
@@ -355,21 +361,14 @@ public class ZKRMStateStore extends RMStateStore {
   }
 
   private synchronized void closeZkClients() throws IOException {
-    if (zkClient != null) {
+    zkClient = null;
+    if (activeZkClient != null) {
       try {
-        zkClient.close();
+        activeZkClient.close();
       } catch (InterruptedException e) {
         throw new IOException("Interrupted while closing ZK", e);
       }
-      zkClient = null;
-    }
-    if (oldZkClient != null) {
-      try {
-        oldZkClient.close();
-      } catch (InterruptedException e) {
-        throw new IOException("Interrupted while closing old ZK", e);
-      }
-      oldZkClient = null;
+      activeZkClient = null;
     }
   }
 
@@ -830,11 +829,16 @@ public class ZKRMStateStore extends RMStateStore {
    * hides the ZK methods of the store from its public interface
    */
   private final class ForwardingWatcher implements Watcher {
+    private ZooKeeper watchedZkClient;
+
+    public ForwardingWatcher(ZooKeeper client) {
+      this.watchedZkClient = client;
+    }
 
     @Override
     public void process(WatchedEvent event) {
       try {
-        ZKRMStateStore.this.processWatchEvent(event);
+        ZKRMStateStore.this.processWatchEvent(watchedZkClient, event);
       } catch (Throwable t) {
         LOG.error("Failed to process watcher event " + event + ": "
             + StringUtils.stringifyException(t));
@@ -845,8 +849,16 @@ public class ZKRMStateStore extends RMStateStore {
   @VisibleForTesting
   @Private
   @Unstable
-  public synchronized void processWatchEvent(WatchedEvent event)
-      throws Exception {
+  public synchronized void processWatchEvent(ZooKeeper zk,
+      WatchedEvent event) throws Exception {
+    // only process watcher event from current ZooKeeper Client session.
+    if (zk != activeZkClient) {
+      LOG.info("Ignore watcher event type: " + event.getType() +
+          " with state:" + event.getState() + " for path:" +
+          event.getPath() + " from old session");
+      return;
+    }
+
     Event.EventType eventType = event.getType();
     LOG.info("Watcher event type: " + eventType + " with state:"
         + event.getState() + " for path:" + event.getPath() + " for " + this);
@@ -857,17 +869,15 @@ public class ZKRMStateStore extends RMStateStore {
       switch (event.getState()) {
         case SyncConnected:
           LOG.info("ZKRMStateStore Session connected");
-          if (oldZkClient != null) {
+          if (zkClient == null) {
             // the SyncConnected must be from the client that sent Disconnected
-            zkClient = oldZkClient;
-            oldZkClient = null;
+            zkClient = activeZkClient;
             ZKRMStateStore.this.notifyAll();
             LOG.info("ZKRMStateStore Session restored");
           }
           break;
         case Disconnected:
           LOG.info("ZKRMStateStore Session disconnected");
-          oldZkClient = zkClient;
           zkClient = null;
           break;
         case Expired:
@@ -1100,7 +1110,8 @@ public class ZKRMStateStore extends RMStateStore {
     for (int retries = 0; retries < numRetries && zkClient == null;
         retries++) {
       try {
-        zkClient = getNewZooKeeper();
+        activeZkClient = getNewZooKeeper();
+        zkClient = activeZkClient;
         for (ZKUtil.ZKAuthInfo zkAuth : zkAuths) {
           zkClient.addAuthInfo(zkAuth.getScheme(), zkAuth.getAuth());
         }
@@ -1130,7 +1141,7 @@ public class ZKRMStateStore extends RMStateStore {
   protected synchronized ZooKeeper getNewZooKeeper()
       throws IOException, InterruptedException {
     ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, null);
-    zk.register(new ForwardingWatcher());
+    zk.register(new ForwardingWatcher(zk));
     return zk;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d3efe6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java
index 8dc3628..62dc5ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java
@@ -71,6 +71,7 @@ public class TestZKRMStateStoreZKClientConnections extends
 
     ZKRMStateStore store;
     boolean forExpire = false;
+    TestForwardingWatcher oldWatcher;
     TestForwardingWatcher watcher;
     CyclicBarrier syncBarrier = new CyclicBarrier(2);
 
@@ -86,35 +87,36 @@ public class TestZKRMStateStoreZKClientConnections extends
       @Override
       public ZooKeeper getNewZooKeeper()
           throws IOException, InterruptedException {
+        oldWatcher = watcher;
+        watcher = new TestForwardingWatcher();
         return createClient(watcher, hostPort, ZK_TIMEOUT_MS);
       }
 
       @Override
-      public synchronized void processWatchEvent(WatchedEvent event)
-          throws Exception {
+      public synchronized void processWatchEvent(ZooKeeper zk,
+          WatchedEvent event) throws Exception {
 
         if (forExpire) {
           // a hack... couldn't find a way to trigger expired event.
           WatchedEvent expriredEvent = new WatchedEvent(
               Watcher.Event.EventType.None,
               Watcher.Event.KeeperState.Expired, null);
-          super.processWatchEvent(expriredEvent);
+          super.processWatchEvent(zk, expriredEvent);
           forExpire = false;
           syncBarrier.await();
         } else {
-          super.processWatchEvent(event);
+          super.processWatchEvent(zk, event);
         }
       }
     }
 
     private class TestForwardingWatcher extends
         ClientBaseWithFixes.CountdownWatcher {
-
       public void process(WatchedEvent event) {
         super.process(event);
         try {
           if (store != null) {
-            store.processWatchEvent(event);
+            store.processWatchEvent(client, event);
           }
         } catch (Throwable t) {
           LOG.error("Failed to process watcher event " + event + ": "
@@ -127,7 +129,6 @@ public class TestZKRMStateStoreZKClientConnections extends
       String workingZnode = "/Test";
       conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
       conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
-      watcher = new TestForwardingWatcher();
       this.store = new TestZKRMStateStore(conf, workingZnode);
       return this.store;
     }
@@ -239,6 +240,24 @@ public class TestZKRMStateStoreZKClientConnections extends
       LOG.error(error, e);
       fail(error);
     }
+
+    // send Disconnected event from old client session to ZKRMStateStore
+    // check the current client session is not affected.
+    Assert.assertTrue(zkClientTester.oldWatcher != null);
+    WatchedEvent disconnectedEvent = new WatchedEvent(
+        Watcher.Event.EventType.None,
+        Watcher.Event.KeeperState.Disconnected, null);
+    zkClientTester.oldWatcher.process(disconnectedEvent);
+    Assert.assertTrue(store.zkClient != null);
+
+    zkClientTester.watcher.process(disconnectedEvent);
+    Assert.assertTrue(store.zkClient == null);
+    WatchedEvent connectedEvent = new WatchedEvent(
+        Watcher.Event.EventType.None,
+        Watcher.Event.KeeperState.SyncConnected, null);
+    zkClientTester.watcher.process(connectedEvent);
+    Assert.assertTrue(store.zkClient != null);
+    Assert.assertTrue(store.zkClient == store.activeZkClient);
   }
 
   @Test(timeout = 20000)


[49/50] [abbrv] hadoop git commit: HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz Wo Nicholas Sze.

Posted by ji...@apache.org.
HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35a08d8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35a08d8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35a08d8c

Branch: refs/heads/HDFS-7285
Commit: 35a08d8c96af3d6a94b302ecacaddfcd428c6b94
Parents: 38b921a
Author: Jing Zhao <ji...@apache.org>
Authored: Mon Mar 9 10:52:17 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:17:56 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 61 +++++++++++++-------
 .../hdfs/server/namenode/TestFileTruncate.java  | 11 +++-
 3 files changed, 51 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35a08d8c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e106b1a..094abfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -734,6 +734,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7411. Change decommission logic to throttle by blocks rather than
     nodes in each interval. (Andrew Wang via cdouglas)
 
+    HDFS-7898. Change TestAppendSnapshotTruncate to fail-fast.
+    (Tsz Wo Nicholas Sze via jing9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35a08d8c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index 5c4c7b4..e80e14f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -69,6 +65,9 @@ public class TestAppendSnapshotTruncate {
   private static final int BLOCK_SIZE = 1024;
   private static final int DATANODE_NUM = 3;
   private static final short REPLICATION = 3;
+  private static final int FILE_WORKER_NUM = 3;
+  private static final long TEST_TIME_SECOND = 10;
+  private static final long TEST_TIMEOUT_SECOND = TEST_TIME_SECOND + 60;
 
   static final int SHORT_HEARTBEAT = 1;
   static final String[] EMPTY_STRINGS = {};
@@ -106,7 +105,7 @@ public class TestAppendSnapshotTruncate {
 
 
   /** Test randomly mixing append, snapshot and truncate operations. */
-  @Test
+  @Test(timeout=TEST_TIMEOUT_SECOND*1000)
   public void testAST() throws Exception {
     final String dirPathString = "/dir";
     final Path dir = new Path(dirPathString);
@@ -121,12 +120,12 @@ public class TestAppendSnapshotTruncate {
     }
     localDir.mkdirs();
 
-    final DirWorker w = new DirWorker(dir, localDir, 3);
+    final DirWorker w = new DirWorker(dir, localDir, FILE_WORKER_NUM);
     w.startAllFiles();
     w.start();
-    Worker.sleep(10L*1000);
+    Worker.sleep(TEST_TIME_SECOND * 1000);
     w.stop();
-    w.stoptAllFiles();
+    w.stopAllFiles();
     w.checkEverything();
   }
 
@@ -259,7 +258,7 @@ public class TestAppendSnapshotTruncate {
       }
     }
     
-    void stoptAllFiles() throws InterruptedException {
+    void stopAllFiles() throws InterruptedException {
       for(FileWorker f : files) { 
         f.stop();
       }
@@ -269,12 +268,12 @@ public class TestAppendSnapshotTruncate {
       LOG.info("checkEverything");
       for(FileWorker f : files) { 
         f.checkFullFile();
-        Preconditions.checkState(f.state.get() != State.ERROR);
+        f.checkErrorState();
       }
       for(String snapshot : snapshotPaths.keySet()) {
         checkSnapshot(snapshot);
       }
-      Preconditions.checkState(state.get() != State.ERROR);
+      checkErrorState();
     }
   }
 
@@ -364,7 +363,7 @@ public class TestAppendSnapshotTruncate {
       b.append(", newLength=").append(newLength)
        .append(", isReady=").append(isReady);
       if (!isReady) {
-        TestFileTruncate.checkBlockRecovery(file, dfs);
+        TestFileTruncate.checkBlockRecovery(file, dfs, 100, 300L);
       }
       return isReady;
     }
@@ -407,6 +406,7 @@ public class TestAppendSnapshotTruncate {
       IDLE(false), RUNNING(false), STOPPED(true), ERROR(true);
       
       final boolean isTerminated;
+
       State(boolean isTerminated) {
         this.isTerminated = isTerminated;
       }
@@ -416,11 +416,29 @@ public class TestAppendSnapshotTruncate {
     final AtomicReference<State> state = new AtomicReference<State>(State.IDLE);
     final AtomicBoolean isCalling = new AtomicBoolean();
     final AtomicReference<Thread> thread = new AtomicReference<Thread>();
-    
+
+    private Throwable thrown = null;
+
     Worker(String name) {
       this.name = name;
     }
 
+    State checkErrorState() {
+      final State s = state.get();
+      if (s == State.ERROR) {
+        throw new IllegalStateException(name + " has " + s, thrown);
+      }
+      return s;
+    }
+
+    void setErrorState(Throwable t) {
+      checkErrorState();
+
+      LOG.error("Worker " + name + " failed.", t);
+      state.set(State.ERROR);
+      thrown = t;
+    }
+
     void start() {
       Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING));
       
@@ -429,14 +447,13 @@ public class TestAppendSnapshotTruncate {
           @Override
           public void run() {
             final Random r = DFSUtil.getRandom();
-            for(State s; (s = state.get()) == State.RUNNING || s == State.IDLE;) {
+            for(State s; !(s = checkErrorState()).isTerminated;) {
               if (s == State.RUNNING) {
                 isCalling.set(true);
                 try {
                   LOG.info(call());
-                } catch (Exception e) {
-                  LOG.error("Worker " + name + " failed.", e);
-                  state.set(State.ERROR);
+                } catch(Throwable t) {
+                  setErrorState(t);
                   return;
                 }
                 isCalling.set(false);
@@ -451,7 +468,11 @@ public class TestAppendSnapshotTruncate {
     }
 
     boolean isPaused() {
-      return state.get() == State.IDLE && !isCalling.get();
+      final State s = checkErrorState();
+      if (s == State.STOPPED) {
+        throw new IllegalStateException(name + " is " + s);
+      }
+      return s == State.IDLE && !isCalling.get();
     }
 
     void pause() {
@@ -459,9 +480,7 @@ public class TestAppendSnapshotTruncate {
     }
 
     void stop() throws InterruptedException {
-      if (state.get() == State.ERROR) {
-        return;
-      }
+      checkErrorState();
 
       state.set(State.STOPPED);
       thread.get().join();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35a08d8c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index cb806aa..c7b9221 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -1153,8 +1153,13 @@ public class TestFileTruncate {
 
   public static void checkBlockRecovery(Path p, DistributedFileSystem dfs)
       throws IOException {
+    checkBlockRecovery(p, dfs, SUCCESS_ATTEMPTS, SLEEP);
+  }
+
+  public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
+      int attempts, long sleepMs) throws IOException {
     boolean success = false;
-    for(int i = 0; i < SUCCESS_ATTEMPTS; i++) {
+    for(int i = 0; i < attempts; i++) {
       LocatedBlocks blocks = getLocatedBlocks(p, dfs);
       boolean noLastBlock = blocks.getLastLocatedBlock() == null;
       if(!blocks.isUnderConstruction() &&
@@ -1162,9 +1167,9 @@ public class TestFileTruncate {
         success = true;
         break;
       }
-      try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {}
+      try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
     }
-    assertThat("inode should complete in ~" + SLEEP * SUCCESS_ATTEMPTS + " ms.",
+    assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
         success, is(true));
   }
 


[26/50] [abbrv] hadoop git commit: HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by Liang Xie.

Posted by ji...@apache.org.
HADOOP-11648. Set DomainSocketWatcher thread name explicitly. Contributed by Liang Xie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5632a4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5632a4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5632a4f

Branch: refs/heads/HDFS-7285
Commit: f5632a4f65e9780eba83c25aa5570f78034f2e41
Parents: b9f374b
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Thu Mar 5 16:05:44 2015 +0900
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt              | 3 +++
 .../java/org/apache/hadoop/net/unix/DomainSocketWatcher.java | 8 +++++---
 .../org/apache/hadoop/net/unix/TestDomainSocketWatcher.java  | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                  | 3 +++
 .../hadoop/hdfs/server/datanode/ShortCircuitRegistry.java    | 2 +-
 .../apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java | 3 ++-
 6 files changed, 15 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d518d9f..92af646 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -647,6 +647,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11658. Externalize io.compression.codecs property.
     (Kai Zheng via aajisaka)
 
+    HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
+    (Liang Xie via ozawa)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index 8c617dc..03b52e0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -238,7 +238,8 @@ public final class DomainSocketWatcher implements Closeable {
    */
   private boolean kicked = false;
 
-  public DomainSocketWatcher(int interruptCheckPeriodMs) throws IOException {
+  public DomainSocketWatcher(int interruptCheckPeriodMs, String src)
+      throws IOException {
     if (loadingFailureReason != null) {
       throw new UnsupportedOperationException(loadingFailureReason);
     }
@@ -246,8 +247,9 @@ public final class DomainSocketWatcher implements Closeable {
     this.interruptCheckPeriodMs = interruptCheckPeriodMs;
     notificationSockets = DomainSocket.socketpair();
     watcherThread.setDaemon(true);
-    watcherThread.setUncaughtExceptionHandler(
-        new Thread.UncaughtExceptionHandler() {
+    watcherThread.setName(src + " DomainSocketWatcher");
+    watcherThread
+        .setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
           @Override
           public void uncaughtException(Thread thread, Throwable t) {
             LOG.error(thread + " terminating on unexpected exception", t);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
index e85e414..4b0e2a8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
@@ -195,7 +195,7 @@ public class TestDomainSocketWatcher {
   private DomainSocketWatcher newDomainSocketWatcher(int interruptCheckPeriodMs)
       throws Exception {
     DomainSocketWatcher watcher = new DomainSocketWatcher(
-        interruptCheckPeriodMs);
+        interruptCheckPeriodMs, getClass().getSimpleName());
     watcher.watcherThread.setUncaughtExceptionHandler(
         new Thread.UncaughtExceptionHandler() {
           @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f9541e0..59f69fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -712,6 +712,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7746. Add a test randomly mixing append, truncate and snapshot
     operations. (szetszwo)
 
+    HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
+    (Liang Xie via ozawa)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
index 965b40a..32906f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
@@ -176,7 +176,7 @@ public class ShortCircuitRegistry {
       if (dswLoadingFailure != null) {
         throw new IOException(dswLoadingFailure);
       }
-      watcher = new DomainSocketWatcher(interruptCheck);
+      watcher = new DomainSocketWatcher(interruptCheck, "datanode");
       enabled = true;
       if (LOG.isDebugEnabled()) {
         LOG.debug("created new ShortCircuitRegistry with interruptCheck=" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5632a4f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
index 6dbaf84..9092bc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
@@ -412,7 +412,8 @@ public class DfsClientShmManager implements Closeable {
   private final DomainSocketWatcher domainSocketWatcher;
   
   DfsClientShmManager(int interruptCheckPeriodMs) throws IOException {
-    this.domainSocketWatcher = new DomainSocketWatcher(interruptCheckPeriodMs);
+    this.domainSocketWatcher = new DomainSocketWatcher(interruptCheckPeriodMs,
+        "client");
   }
   
   public Slot allocSlot(DatanodeInfo datanode, DomainPeer peer,


[38/50] [abbrv] hadoop git commit: YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired. Contributed by Zhijie Shen

Posted by ji...@apache.org.
YARN-3227. Timeline renew delegation token fails when RM user's TGT is
expired. Contributed by Zhijie Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6199e76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6199e76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6199e76

Branch: refs/heads/HDFS-7285
Commit: c6199e76ea86d07f8d96381d5a1ba45999bfdb4e
Parents: 055267d
Author: Xuan <xg...@apache.org>
Authored: Fri Mar 6 13:32:05 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:26 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java    | 2 ++
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6199e76/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index accde78..d073169 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -713,6 +713,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
     events for old client. (Zhihai Xu via kasha)
 
+    YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired
+    (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6199e76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index af68492..c05d65b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -439,6 +439,7 @@ public class TimelineClientImpl extends TimelineClient {
         UserGroupInformation callerUGI = isProxyAccess ?
             UserGroupInformation.getCurrentUser().getRealUser()
             : UserGroupInformation.getCurrentUser();
+        callerUGI.checkTGTAndReloginFromKeytab();
         try {
           return callerUGI.doAs(action);
         } catch (UndeclaredThrowableException e) {
@@ -488,6 +489,7 @@ public class TimelineClientImpl extends TimelineClient {
           : UserGroupInformation.getCurrentUser();
       final String doAsUser = isProxyAccess ?
           UserGroupInformation.getCurrentUser().getShortUserName() : null;
+      callerUGI.checkTGTAndReloginFromKeytab();
       try {
         return callerUGI.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
           @Override


[15/50] [abbrv] hadoop git commit: HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file includes non-snapshotted content. Contributed by Charles Lamb.

Posted by ji...@apache.org.
HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file includes non-snapshotted content. Contributed by Charles Lamb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ac995e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ac995e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ac995e5

Branch: refs/heads/HDFS-7285
Commit: 0ac995e50c9c6073a51fbcacdb05da53a844f59b
Parents: 046dc67
Author: Aaron T. Myers <at...@apache.org>
Authored: Tue Mar 3 18:08:59 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  3 +++
 .../snapshot/TestSnapshotFileLength.java        | 25 +++++++++++++++++---
 3 files changed, 28 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac995e5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4e7b919..7ff3c78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1080,6 +1080,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-6565. Use jackson instead jetty json in hdfs-client.
     (Akira Ajisaka via wheat9)
 
+    HDFS-7682. {{DistributedFileSystem#getFileChecksum}} of a snapshotted file
+    includes non-snapshotted content. (Charles Lamb via atm)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac995e5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index abcd847..aac7b51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2220,6 +2220,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
 
     // get block checksum for each block
     long remaining = length;
+    if (src.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) {
+      remaining = Math.min(length, blockLocations.getFileLength());
+    }
     for(int i = 0; i < locatedblocks.size() && remaining > 0; i++) {
       if (refetchBlocks) {  // refetch to get fresh tokens
         blockLocations = callGetBlockLocations(namenode, src, 0, length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac995e5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
index 98aafc1..d53140f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 
-
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -29,8 +29,9 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
-
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -103,17 +104,35 @@ public class TestSnapshotFileLength {
     Path file1snap1
         = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
 
+    final FileChecksum snapChksum1 = hdfs.getFileChecksum(file1snap1);
+    assertThat("file and snapshot file checksums are not equal",
+        hdfs.getFileChecksum(file1), is(snapChksum1));
+
     // Append to the file.
     FSDataOutputStream out = hdfs.append(file1);
+    // Nothing has been appended yet. All checksums should still be equal.
+    assertThat("file and snapshot checksums (open for append) are not equal",
+        hdfs.getFileChecksum(file1), is(snapChksum1));
+    assertThat("snapshot checksum (post-open for append) has changed",
+        hdfs.getFileChecksum(file1snap1), is(snapChksum1));
     try {
       AppendTestUtil.write(out, 0, toAppend);
       // Test reading from snapshot of file that is open for append
       byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
       assertThat("Wrong data size in snapshot.",
           dataFromSnapshot.length, is(origLen));
+      // Verify that checksum didn't change
+      assertThat("snapshot file checksum (pre-close) has changed",
+          hdfs.getFileChecksum(file1), is(snapChksum1));
+      assertThat("snapshot checksum (post-append) has changed",
+          hdfs.getFileChecksum(file1snap1), is(snapChksum1));
     } finally {
       out.close();
     }
+    assertThat("file and snapshot file checksums (post-close) are equal",
+        hdfs.getFileChecksum(file1), not(snapChksum1));
+    assertThat("snapshot file checksum (post-close) has changed",
+        hdfs.getFileChecksum(file1snap1), is(snapChksum1));
 
     // Make sure we can read the entire file via its non-snapshot path.
     fileStatus = hdfs.getFileStatus(file1);


[45/50] [abbrv] hadoop git commit: Moving CHANGES.txt entry for MAPREDUCE-5657 to branch-2.

Posted by ji...@apache.org.
Moving CHANGES.txt entry for MAPREDUCE-5657 to branch-2.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f88e63e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f88e63e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f88e63e6

Branch: refs/heads/HDFS-7285
Commit: f88e63e61343d3888fb2a4324b2156f6716ed41e
Parents: 662781e
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Mon Mar 9 12:04:44 2015 +0900
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:17:55 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f88e63e6/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d0d8216..049b17d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -181,9 +181,6 @@ Trunk (Unreleased)
     MAPREDUCE-6234. TestHighRamJob fails due to the change in MAPREDUCE-5785. 
     (Masatake Iwasaki via kasha)
 
-    MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc
-    comments. (Akira AJISAKA via ozawa)
-
   BREAKDOWN OF MAPREDUCE-2841 (NATIVE TASK) SUBTASKS
 
     MAPREDUCE-5985. native-task: Fix build on macosx. Contributed by
@@ -417,6 +414,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6136. MRAppMaster doesn't shutdown file systems. (Brahma 
     Reddy Battula via ozawa)
 
+    MAPREDUCE-5657. Fix Javadoc errors caused by incorrect or illegal tags in doc
+    comments. (Akira AJISAKA and Andrew Purtell via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[10/50] [abbrv] hadoop git commit: HDFS-6565. Use jackson instead jetty json in hdfs-client. Contributed by Akira AJISAKA.

Posted by ji...@apache.org.
HDFS-6565. Use jackson instead jetty json in hdfs-client. Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/046dc672
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/046dc672
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/046dc672

Branch: refs/heads/HDFS-7285
Commit: 046dc672e6206224f2b5ca3e7577540ef22febbf
Parents: bab6209c
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Mar 3 17:54:13 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    | 217 +++++++++----------
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  21 +-
 .../apache/hadoop/hdfs/web/TestJsonUtil.java    |  22 +-
 4 files changed, 127 insertions(+), 136 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/046dc672/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42430ef..4e7b919 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1077,6 +1077,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7757. Misleading error messages in FSImage.java. (Brahma Reddy Battula
     via Arpit Agarwal)
 
+    HDFS-6565. Use jackson instead jetty json in hdfs-client.
+    (Akira Ajisaka via wheat9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/046dc672/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index aa6100c..2e67848 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
@@ -35,7 +34,8 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
-import org.mortbay.util.ajax.JSON;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -95,59 +95,6 @@ public class JsonUtil {
     return (Token<BlockTokenIdentifier>)toToken(m);
   }
 
-  /** Convert a Token[] to a JSON array. */
-  private static Object[] toJsonArray(final Token<? extends TokenIdentifier>[] array
-      ) throws IOException {
-    if (array == null) {
-      return null;
-    } else if (array.length == 0) {
-      return EMPTY_OBJECT_ARRAY;
-    } else {
-      final Object[] a = new Object[array.length];
-      for(int i = 0; i < array.length; i++) {
-        a[i] = toJsonMap(array[i]);
-      }
-      return a;
-    }
-  }
-
-  /** Convert a token object to a JSON string. */
-  public static String toJsonString(final Token<? extends TokenIdentifier>[] tokens
-      ) throws IOException {
-    if (tokens == null) {
-      return null;
-    }
-
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put(Token.class.getSimpleName(), toJsonArray(tokens));
-    return toJsonString(Token.class.getSimpleName() + "s", m);
-  }
-
-  /** Convert an Object[] to a List<Token<?>>.  */
-  private static List<Token<?>> toTokenList(final Object[] objects) throws IOException {
-    if (objects == null) {
-      return null;
-    } else if (objects.length == 0) {
-      return Collections.emptyList();
-    } else {
-      final List<Token<?>> list = new ArrayList<Token<?>>(objects.length);
-      for(int i = 0; i < objects.length; i++) {
-        list.add(toToken((Map<?, ?>)objects[i]));
-      }
-      return list;
-    }
-  }
-
-  /** Convert a JSON map to a List<Token<?>>. */
-  public static List<Token<?>> toTokenList(final Map<?, ?> json) throws IOException {
-    if (json == null) {
-      return null;
-    }
-
-    final Map<?, ?> m = (Map<?, ?>)json.get(Token.class.getSimpleName() + "s");
-    return toTokenList((Object[])m.get(Token.class.getSimpleName()));
-  }
-
   /** Convert an exception object to a Json string. */
   public static String toJsonString(final Exception e) {
     final Map<String, Object> m = new TreeMap<String, Object>();
@@ -173,7 +120,12 @@ public class JsonUtil {
   public static String toJsonString(final String key, final Object value) {
     final Map<String, Object> m = new TreeMap<String, Object>();
     m.put(key, value);
-    return JSON.toString(m);
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return mapper.writeValueAsString(m);
+    } catch (IOException ignored) {
+    }
+    return null;
   }
 
   /** Convert a FsPermission object to a string. */
@@ -233,7 +185,13 @@ public class JsonUtil {
     m.put("fileId", status.getFileId());
     m.put("childrenNum", status.getChildrenNum());
     m.put("storagePolicy", status.getStoragePolicy());
-    return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m);
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return includeType ?
+          toJsonString(FileStatus.class, m) : mapper.writeValueAsString(m);
+    } catch (IOException ignored) {
+    }
+    return null;
   }
 
   /** Convert a Json map to a HdfsFileStatus object. */
@@ -249,25 +207,23 @@ public class JsonUtil {
     final byte[] symlink = type != PathType.SYMLINK? null
         : DFSUtil.string2Bytes((String)m.get("symlink"));
 
-    final long len = (Long) m.get("length");
+    final long len = ((Number) m.get("length")).longValue();
     final String owner = (String) m.get("owner");
     final String group = (String) m.get("group");
     final FsPermission permission = toFsPermission((String) m.get("permission"),
       (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
-    final long aTime = (Long) m.get("accessTime");
-    final long mTime = (Long) m.get("modificationTime");
-    final long blockSize = (Long) m.get("blockSize");
+    final long aTime = ((Number) m.get("accessTime")).longValue();
+    final long mTime = ((Number) m.get("modificationTime")).longValue();
+    final long blockSize = ((Number) m.get("blockSize")).longValue();
     final boolean isLazyPersist = m.containsKey("lazyPersist")
         ? (Boolean) m.get("lazyPersist") : false;
-    final short replication = (short) (long) (Long) m.get("replication");
-    final long fileId = m.containsKey("fileId") ? (Long) m.get("fileId")
-        : INodeId.GRANDFATHER_INODE_ID;
-    Long childrenNumLong = (Long) m.get("childrenNum");
-    final int childrenNum = (childrenNumLong == null) ? -1
-            : childrenNumLong.intValue();
+    final short replication = ((Number) m.get("replication")).shortValue();
+    final long fileId = m.containsKey("fileId") ?
+        ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
+    final int childrenNum = getInt(m, "childrenNum", -1);
     final byte storagePolicy = m.containsKey("storagePolicy") ?
-        (byte) (long) (Long) m.get("storagePolicy") :
-          BlockStoragePolicySuite.ID_UNSPECIFIED;
+        (byte) ((Number) m.get("storagePolicy")).longValue() :
+        BlockStoragePolicySuite.ID_UNSPECIFIED;
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
         blockSize, mTime, aTime, permission, owner, group,
         symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
@@ -295,9 +251,10 @@ public class JsonUtil {
     }
     
     final String blockPoolId = (String)m.get("blockPoolId");
-    final long blockId = (Long)m.get("blockId");
-    final long numBytes = (Long)m.get("numBytes");
-    final long generationStamp = (Long)m.get("generationStamp");
+    final long blockId = ((Number) m.get("blockId")).longValue();
+    final long numBytes = ((Number) m.get("numBytes")).longValue();
+    final long generationStamp =
+        ((Number) m.get("generationStamp")).longValue();
     return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
   }
   
@@ -338,7 +295,7 @@ public class JsonUtil {
     if (value == null) {
       return defaultValue;
     }
-    return (int) (long) (Long) value;
+    return ((Number) value).intValue();
   }
 
   private static long getLong(Map<?, ?> m, String key, final long defaultValue) {
@@ -346,7 +303,7 @@ public class JsonUtil {
     if (value == null) {
       return defaultValue;
     }
-    return (Long) value;
+    return ((Number) value).longValue();
   }
 
   private static String getString(Map<?, ?> m, String key,
@@ -358,6 +315,15 @@ public class JsonUtil {
     return (String) value;
   }
 
+  static List<?> getList(Map<?, ?> m, String key) {
+    Object list = m.get(key);
+    if (list instanceof List<?>) {
+      return (List<?>) list;
+    } else {
+      return null;
+    }
+  }
+
   /** Convert a Json map to an DatanodeInfo object. */
   static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) 
     throws IOException {
@@ -402,9 +368,9 @@ public class JsonUtil {
         (String)m.get("hostName"),
         (String)m.get("storageID"),
         xferPort,
-        (int)(long)(Long)m.get("infoPort"),
+        ((Number) m.get("infoPort")).intValue(),
         getInt(m, "infoSecurePort", 0),
-        (int)(long)(Long)m.get("ipcPort"),
+        ((Number) m.get("ipcPort")).intValue(),
 
         getLong(m, "capacity", 0l),
         getLong(m, "dfsUsed", 0l),
@@ -434,16 +400,17 @@ public class JsonUtil {
   }
 
   /** Convert an Object[] to a DatanodeInfo[]. */
-  private static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) 
+  private static DatanodeInfo[] toDatanodeInfoArray(final List<?> objects)
       throws IOException {
     if (objects == null) {
       return null;
-    } else if (objects.length == 0) {
+    } else if (objects.isEmpty()) {
       return EMPTY_DATANODE_INFO_ARRAY;
     } else {
-      final DatanodeInfo[] array = new DatanodeInfo[objects.length];
-      for(int i = 0; i < array.length; i++) {
-        array[i] = toDatanodeInfo((Map<?, ?>) objects[i]);
+      final DatanodeInfo[] array = new DatanodeInfo[objects.size()];
+      int i = 0;
+      for (Object object : objects) {
+        array[i++] = toDatanodeInfo((Map<?, ?>) object);
       }
       return array;
     }
@@ -474,11 +441,11 @@ public class JsonUtil {
 
     final ExtendedBlock b = toExtendedBlock((Map<?, ?>)m.get("block"));
     final DatanodeInfo[] locations = toDatanodeInfoArray(
-        (Object[])m.get("locations"));
-    final long startOffset = (Long)m.get("startOffset");
+        getList(m, "locations"));
+    final long startOffset = ((Number) m.get("startOffset")).longValue();
     final boolean isCorrupt = (Boolean)m.get("isCorrupt");
     final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(
-        (Object[])m.get("cachedLocations"));
+        getList(m, "cachedLocations"));
 
     final LocatedBlock locatedblock = new LocatedBlock(b, locations,
         null, null, startOffset, isCorrupt, cachedLocations);
@@ -502,17 +469,17 @@ public class JsonUtil {
     }
   }
 
-  /** Convert an Object[] to a List of LocatedBlock. */
-  private static List<LocatedBlock> toLocatedBlockList(final Object[] objects
-      ) throws IOException {
+  /** Convert an List of Object to a List of LocatedBlock. */
+  private static List<LocatedBlock> toLocatedBlockList(
+      final List<?> objects) throws IOException {
     if (objects == null) {
       return null;
-    } else if (objects.length == 0) {
+    } else if (objects.isEmpty()) {
       return Collections.emptyList();
     } else {
-      final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length);
-      for(int i = 0; i < objects.length; i++) {
-        list.add(toLocatedBlock((Map<?, ?>)objects[i]));
+      final List<LocatedBlock> list = new ArrayList<>(objects.size());
+      for (Object object : objects) {
+        list.add(toLocatedBlock((Map<?, ?>) object));
       }
       return list;
     }
@@ -543,10 +510,10 @@ public class JsonUtil {
     }
 
     final Map<?, ?> m = (Map<?, ?>)json.get(LocatedBlocks.class.getSimpleName());
-    final long fileLength = (Long)m.get("fileLength");
+    final long fileLength = ((Number) m.get("fileLength")).longValue();
     final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
     final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
-        (Object[])m.get("locatedBlocks"));
+        getList(m, "locatedBlocks"));
     final LocatedBlock lastLocatedBlock = toLocatedBlock(
         (Map<?, ?>)m.get("lastLocatedBlock"));
     final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
@@ -577,12 +544,12 @@ public class JsonUtil {
     }
 
     final Map<?, ?> m = (Map<?, ?>)json.get(ContentSummary.class.getSimpleName());
-    final long length = (Long)m.get("length");
-    final long fileCount = (Long)m.get("fileCount");
-    final long directoryCount = (Long)m.get("directoryCount");
-    final long quota = (Long)m.get("quota");
-    final long spaceConsumed = (Long)m.get("spaceConsumed");
-    final long spaceQuota = (Long)m.get("spaceQuota");
+    final long length = ((Number) m.get("length")).longValue();
+    final long fileCount = ((Number) m.get("fileCount")).longValue();
+    final long directoryCount = ((Number) m.get("directoryCount")).longValue();
+    final long quota = ((Number) m.get("quota")).longValue();
+    final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
+    final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 
     return new ContentSummary(length, fileCount, directoryCount,
         quota, spaceConsumed, spaceQuota);
@@ -610,7 +577,7 @@ public class JsonUtil {
 
     final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName());
     final String algorithm = (String)m.get("algorithm");
-    final int length = (int)(long)(Long)m.get("length");
+    final int length = ((Number) m.get("length")).intValue();
     final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));
 
     final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
@@ -654,7 +621,13 @@ public class JsonUtil {
     m.put("owner", status.getOwner());
     m.put("group", status.getGroup());
     m.put("stickyBit", status.isStickyBit());
-    m.put("entries", status.getEntries());
+
+    final List<String> stringEntries = new ArrayList<>();
+    for (AclEntry entry : status.getEntries()) {
+      stringEntries.add(entry.toString());
+    }
+    m.put("entries", stringEntries);
+
     FsPermission perm = status.getPermission();
     if (perm != null) {
       m.put("permission", toString(perm));
@@ -668,7 +641,13 @@ public class JsonUtil {
     final Map<String, Map<String, Object>> finalMap =
         new TreeMap<String, Map<String, Object>>();
     finalMap.put(AclStatus.class.getSimpleName(), m);
-    return JSON.toString(finalMap);
+
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return mapper.writeValueAsString(finalMap);
+    } catch (IOException ignored) {
+    }
+    return null;
   }
 
   /** Convert a Json map to a AclStatus object. */
@@ -689,11 +668,11 @@ public class JsonUtil {
           (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"));
       aclStatusBuilder.setPermission(permission);
     }
-    final Object[] entries = (Object[]) m.get("entries");
+    final List<?> entries = (List<?>) m.get("entries");
 
     List<AclEntry> aclEntryList = new ArrayList<AclEntry>();
-    for (int i = 0; i < entries.length; i++) {
-      AclEntry aclEntry = AclEntry.parseAclEntry((String) entries[i], true);
+    for (Object entry : entries) {
+      AclEntry aclEntry = AclEntry.parseAclEntry((String) entry, true);
       aclEntryList.add(aclEntry);
     }
     aclStatusBuilder.addEntries(aclEntryList);
@@ -732,7 +711,8 @@ public class JsonUtil {
       final XAttrCodec encoding) throws IOException {
     final Map<String, Object> finalMap = new TreeMap<String, Object>();
     finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
-    return JSON.toString(finalMap);
+    ObjectMapper mapper = new ObjectMapper();
+    return mapper.writeValueAsString(finalMap);
   }
   
   public static String toJsonString(final List<XAttr> xAttrs)
@@ -741,10 +721,11 @@ public class JsonUtil {
     for (XAttr xAttr : xAttrs) {
       names.add(XAttrHelper.getPrefixName(xAttr));
     }
-    String ret = JSON.toString(names);
+    ObjectMapper mapper = new ObjectMapper();
+    String ret = mapper.writeValueAsString(names);
     final Map<String, Object> finalMap = new TreeMap<String, Object>();
     finalMap.put("XAttrNames", ret);
-    return JSON.toString(finalMap);
+    return mapper.writeValueAsString(finalMap);
   }
 
   public static byte[] getXAttr(final Map<?, ?> json, final String name) 
@@ -760,14 +741,13 @@ public class JsonUtil {
     
     return null;
   }
-  
+
   public static Map<String, byte[]> toXAttrs(final Map<?, ?> json) 
       throws IOException {
     if (json == null) {
       return null;
     }
-    
-    return toXAttrMap((Object[])json.get("XAttrs"));
+    return toXAttrMap(getList(json, "XAttrs"));
   }
   
   public static List<String> toXAttrNames(final Map<?, ?> json)
@@ -777,26 +757,27 @@ public class JsonUtil {
     }
 
     final String namesInJson = (String) json.get("XAttrNames");
-    final Object[] xattrs = (Object[]) JSON.parse(namesInJson);
+    ObjectReader reader = new ObjectMapper().reader(List.class);
+    final List<Object> xattrs = reader.readValue(namesInJson);
     final List<String> names =
       Lists.newArrayListWithCapacity(json.keySet().size());
 
-    for (int i = 0; i < xattrs.length; i++) {
-        names.add((String) (xattrs[i]));
+    for (Object xattr : xattrs) {
+      names.add((String) xattr);
     }
     return names;
   }
 
-  private static Map<String, byte[]> toXAttrMap(final Object[] objects) 
+  private static Map<String, byte[]> toXAttrMap(final List<?> objects)
       throws IOException {
     if (objects == null) {
       return null;
-    } else if (objects.length == 0) {
+    } else if (objects.isEmpty()) {
       return Maps.newHashMap();
     } else {
       final Map<String, byte[]> xAttrs = Maps.newHashMap();
-      for(int i = 0; i < objects.length; i++) {
-        Map<?, ?> m = (Map<?, ?>) objects[i];
+      for (Object object : objects) {
+        Map<?, ?> m = (Map<?, ?>) object;
         String name = (String) m.get("name");
         String value = (String) m.get("value");
         xAttrs.put(name, decodeXAttrValue(value));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/046dc672/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index a907404..739e701 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -22,7 +22,6 @@ import java.io.BufferedOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
@@ -81,10 +80,9 @@ import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.StringUtils;
-import org.mortbay.util.ajax.JSON;
+import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Charsets;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
@@ -323,7 +321,8 @@ public class WebHdfsFileSystem extends FileSystem
               + "\" (parsed=\"" + parsed + "\")");
         }
       }
-      return (Map<?, ?>)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
+      ObjectMapper mapper = new ObjectMapper();
+      return mapper.reader(Map.class).readValue(in);
     } finally {
       in.close();
     }
@@ -1291,13 +1290,15 @@ public class WebHdfsFileSystem extends FileSystem
       @Override
       FileStatus[] decodeResponse(Map<?,?> json) {
         final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
-        final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());
+        final List<?> array = JsonUtil.getList(
+            rootmap, FileStatus.class.getSimpleName());
 
         //convert FileStatus
-        final FileStatus[] statuses = new FileStatus[array.length];
-        for (int i = 0; i < array.length; i++) {
-          final Map<?, ?> m = (Map<?, ?>)array[i];
-          statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
+        final FileStatus[] statuses = new FileStatus[array.size()];
+        int i = 0;
+        for (Object object : array) {
+          final Map<?, ?> m = (Map<?, ?>) object;
+          statuses[i++] = makeQualified(JsonUtil.toFileStatus(m, false), f);
         }
         return statuses;
       }
@@ -1348,7 +1349,7 @@ public class WebHdfsFileSystem extends FileSystem
         new TokenArgumentParam(token.encodeToUrlString())) {
       @Override
       Long decodeResponse(Map<?,?> json) throws IOException {
-        return (Long) json.get("long");
+        return ((Number) json.get("long")).longValue();
       }
     }.run();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/046dc672/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 3eba7db..0ed38f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -42,9 +42,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.util.Time;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mortbay.util.ajax.JSON;
 
 import com.google.common.collect.Lists;
 
@@ -58,7 +59,7 @@ public class TestJsonUtil {
   }
 
   @Test
-  public void testHdfsFileStatus() {
+  public void testHdfsFileStatus() throws IOException {
     final long now = Time.now();
     final String parent = "/dir";
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
@@ -70,7 +71,9 @@ public class TestJsonUtil {
     System.out.println("fstatus = " + fstatus);
     final String json = JsonUtil.toJsonString(status, true);
     System.out.println("json    = " + json.replace(",", ",\n  "));
-    final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
+    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    final HdfsFileStatus s2 =
+        JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
     final FileStatus fs2 = toFileStatus(s2, parent);
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
@@ -153,10 +156,11 @@ public class TestJsonUtil {
   }
   
   @Test
-  public void testToAclStatus() {
+  public void testToAclStatus() throws IOException {
     String jsonString =
         "{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
-    Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
+    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    Map<?, ?> json = reader.readValue(jsonString);
 
     List<AclEntry> aclSpec =
         Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
@@ -215,7 +219,8 @@ public class TestJsonUtil {
     String jsonString = 
         "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
         "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
-    Map<?, ?> json = (Map<?, ?>)JSON.parse(jsonString);
+    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    Map<?, ?> json = reader.readValue(jsonString);
     XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
         setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
     XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
@@ -240,8 +245,9 @@ public class TestJsonUtil {
     String jsonString = 
         "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
         "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
-    Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
-    
+    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    Map<?, ?> json = reader.readValue(jsonString);
+
     // Get xattr: user.a2
     byte[] value = JsonUtil.getXAttr(json, "user.a2");
     Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);


[04/50] [abbrv] hadoop git commit: HDFS-7757. Misleading error messages in FSImage.java. (Contributed by Brahma Reddy Battula)

Posted by ji...@apache.org.
HDFS-7757. Misleading error messages in FSImage.java. (Contributed by Brahma Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bc27985
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bc27985
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bc27985

Branch: refs/heads/HDFS-7285
Commit: 6bc27985a305a768d10f834ad8d90616cffdbcf6
Parents: 68c9b55
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Mar 3 10:55:22 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:22 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSImage.java   | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc27985/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fe78097..42430ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1074,6 +1074,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7871. NameNodeEditLogRoller can keep printing "Swallowing exception"
     message. (jing9)
 
+    HDFS-7757. Misleading error messages in FSImage.java. (Brahma Reddy Battula
+    via Arpit Agarwal)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bc27985/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 44c41d0..e589eea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -883,7 +883,7 @@ public class FSImage implements Closeable {
       final long namespace = counts.getNameSpace() - parentNamespace;
       final long nsQuota = q.getNameSpace();
       if (Quota.isViolated(nsQuota, namespace)) {
-        LOG.error("BUG: Namespace quota violation in image for "
+        LOG.warn("Namespace quota violation in image for "
             + dir.getFullPathName()
             + " quota = " + nsQuota + " < consumed = " + namespace);
       }
@@ -891,7 +891,7 @@ public class FSImage implements Closeable {
       final long ssConsumed = counts.getStorageSpace() - parentStoragespace;
       final long ssQuota = q.getStorageSpace();
       if (Quota.isViolated(ssQuota, ssConsumed)) {
-        LOG.error("BUG: Storagespace quota violation in image for "
+        LOG.warn("Storagespace quota violation in image for "
             + dir.getFullPathName()
             + " quota = " + ssQuota + " < consumed = " + ssConsumed);
       }
@@ -903,7 +903,7 @@ public class FSImage implements Closeable {
             parentTypeSpaces.get(t);
         final long typeQuota = q.getTypeSpaces().get(t);
         if (Quota.isViolated(typeQuota, typeSpace)) {
-          LOG.error("BUG: Storage type quota violation in image for "
+          LOG.warn("Storage type quota violation in image for "
               + dir.getFullPathName()
               + " type = " + t.toString() + " quota = "
               + typeQuota + " < consumed " + typeSpace);


[19/50] [abbrv] hadoop git commit: HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.

Posted by ji...@apache.org.
HDFS-7746. Add a test randomly mixing append, truncate and snapshot operations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27f89818
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27f89818
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27f89818

Branch: refs/heads/HDFS-7285
Commit: 27f89818eee5084ffd475cadc42b76f2c32a747b
Parents: d138804
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Thu Mar 5 10:21:29 2015 +0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:24 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 478 +++++++++++++++++++
 2 files changed, 481 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f89818/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d9008d9..f9541e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -709,6 +709,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-1522. Combine two BLOCK_FILE_PREFIX constants into one.
     (Dongming Liang via shv)
 
+    HDFS-7746. Add a test randomly mixing append, truncate and snapshot
+    operations. (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f89818/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
new file mode 100644
index 0000000..5c4c7b4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -0,0 +1,478 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Test randomly mixing append, snapshot and truncate operations.
+ * Use local file system to simulate the each operation and verify
+ * the correctness.
+ */
+public class TestAppendSnapshotTruncate {
+  static {
+    GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
+  }
+  private static final Log LOG = LogFactory.getLog(TestAppendSnapshotTruncate.class);
+  private static final int BLOCK_SIZE = 1024;
+  private static final int DATANODE_NUM = 3;
+  private static final short REPLICATION = 3;
+
+  static final int SHORT_HEARTBEAT = 1;
+  static final String[] EMPTY_STRINGS = {};
+
+  static Configuration conf;
+  static MiniDFSCluster cluster;
+  static DistributedFileSystem dfs;
+
+  @BeforeClass
+  public static void startUp() throws IOException {
+    conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
+    conf.setLong(
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+    cluster = new MiniDFSCluster.Builder(conf)
+        .format(true)
+        .numDataNodes(DATANODE_NUM)
+        .nameNodePort(NameNode.DEFAULT_PORT)
+        .waitSafeMode(true)
+        .build();
+    dfs = cluster.getFileSystem();
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    if(dfs != null) {
+      dfs.close();
+    }
+    if(cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+
+  /** Test randomly mixing append, snapshot and truncate operations. */
+  @Test
+  public void testAST() throws Exception {
+    final String dirPathString = "/dir";
+    final Path dir = new Path(dirPathString);
+    dfs.mkdirs(dir);
+    dfs.allowSnapshot(dir);
+
+    final File localDir = new File(
+        System.getProperty("test.build.data", "target/test/data")
+        + dirPathString);
+    if (localDir.exists()) {
+      FileUtil.fullyDelete(localDir);
+    }
+    localDir.mkdirs();
+
+    final DirWorker w = new DirWorker(dir, localDir, 3);
+    w.startAllFiles();
+    w.start();
+    Worker.sleep(10L*1000);
+    w.stop();
+    w.stoptAllFiles();
+    w.checkEverything();
+  }
+
+  static final FileFilter FILE_ONLY = new FileFilter() {
+    @Override
+    public boolean accept(File f) {
+      return f.isFile();
+    }
+  };
+
+  static class DirWorker extends Worker {
+    final Path dir;
+    final File localDir;
+    
+    final FileWorker[] files;
+
+    private Map<String, Path> snapshotPaths = new HashMap<String, Path>();
+    private AtomicInteger snapshotCount = new AtomicInteger();
+
+    DirWorker(Path dir, File localDir, int nFiles) throws IOException {
+      super(dir.getName());
+      this.dir = dir;
+      this.localDir = localDir;
+
+      this.files = new FileWorker[nFiles];
+      for(int i = 0; i < files.length; i++) {
+        files[i] = new FileWorker(dir, localDir, String.format("file%02d", i));
+      }
+    }
+
+    static String getSnapshotName(int n) {
+      return String.format("s%02d", n);
+    }
+
+    String createSnapshot(String snapshot) throws IOException {
+      final StringBuilder b = new StringBuilder("createSnapshot: ")
+          .append(snapshot).append(" for ").append(dir);
+
+      {
+        //copy all local files to a sub dir to simulate snapshot. 
+        final File subDir = new File(localDir, snapshot);
+        Assert.assertFalse(subDir.exists());
+        subDir.mkdir();
+
+        for(File f : localDir.listFiles(FILE_ONLY)) {
+          FileUtils.copyFile(f, new File(subDir, f.getName()));
+        }
+      }
+      
+      final Path p = dfs.createSnapshot(dir, snapshot);
+      snapshotPaths.put(snapshot, p);
+      return b.toString();
+    }
+
+    String checkSnapshot(String snapshot) throws IOException {
+      final StringBuilder b = new StringBuilder("checkSnapshot: ")
+          .append(snapshot);
+
+      final File subDir = new File(localDir, snapshot);
+      Assert.assertTrue(subDir.exists());
+      
+      final File[] localFiles = subDir.listFiles(FILE_ONLY);
+      final Path p = snapshotPaths.get(snapshot);
+      final FileStatus[] statuses = dfs.listStatus(p);
+      Assert.assertEquals(localFiles.length, statuses.length);
+      b.append(p).append(" vs ").append(subDir).append(", ")
+       .append(statuses.length).append(" entries");
+      
+      Arrays.sort(localFiles);
+      Arrays.sort(statuses);
+      for(int i = 0; i < statuses.length; i++) {
+        FileWorker.checkFullFile(statuses[i].getPath(), localFiles[i]);
+      }
+      return b.toString();
+    }
+
+    String deleteSnapshot(String snapshot) throws IOException {
+      final StringBuilder b = new StringBuilder("deleteSnapshot: ")
+          .append(snapshot).append(" from ").append(dir);
+      FileUtil.fullyDelete(new File(localDir, snapshot));
+      dfs.deleteSnapshot(dir, snapshot);
+      snapshotPaths.remove(snapshot);
+      return b.toString();
+    }
+
+    
+    @Override
+    public String call() throws Exception {
+      final Random r = DFSUtil.getRandom();
+      final int op = r.nextInt(6);
+      if (op <= 1) {
+        pauseAllFiles();
+        try {
+          final String snapshot = getSnapshotName(snapshotCount.getAndIncrement());
+          return createSnapshot(snapshot);
+        } finally {
+          startAllFiles();
+        }
+      } else if (op <= 3) {
+        final String[] keys = snapshotPaths.keySet().toArray(EMPTY_STRINGS);
+        if (keys.length == 0) {
+          return "NO-OP";
+        }
+        final String snapshot = keys[r.nextInt(keys.length)];
+        final String s = checkSnapshot(snapshot);
+        
+        if (op == 2) {
+          return deleteSnapshot(snapshot);
+        }
+        return s;
+      } else {
+        return "NO-OP";
+      }
+    }
+
+    void pauseAllFiles() {
+      for(FileWorker f : files) { 
+        f.pause();
+      }
+
+      for(int i = 0; i < files.length; ) {
+        sleep(100);
+        for(; i < files.length && files[i].isPaused(); i++);
+      }
+    }
+    
+    void startAllFiles() {
+      for(FileWorker f : files) { 
+        f.start();
+      }
+    }
+    
+    void stoptAllFiles() throws InterruptedException {
+      for(FileWorker f : files) { 
+        f.stop();
+      }
+    }
+
+    void checkEverything() throws IOException {
+      LOG.info("checkEverything");
+      for(FileWorker f : files) { 
+        f.checkFullFile();
+        Preconditions.checkState(f.state.get() != State.ERROR);
+      }
+      for(String snapshot : snapshotPaths.keySet()) {
+        checkSnapshot(snapshot);
+      }
+      Preconditions.checkState(state.get() != State.ERROR);
+    }
+  }
+
+  static class FileWorker extends Worker {
+    final Path file;
+    final File localFile;
+
+    FileWorker(Path dir, File localDir, String filename) throws IOException {
+      super(filename);
+      this.file = new Path(dir, filename);
+      this.localFile = new File(localDir, filename);
+
+      localFile.createNewFile();
+      dfs.create(file, false, 4096, REPLICATION, BLOCK_SIZE).close();
+    }
+
+    @Override
+    public String call() throws IOException {
+      final Random r = DFSUtil.getRandom();
+      final int op = r.nextInt(9);
+      if (op == 0) {
+        return checkFullFile();
+      } else {
+        final int nBlocks = r.nextInt(4) + 1;
+        final int lastBlockSize = r.nextInt(BLOCK_SIZE) + 1;
+        final int nBytes = nBlocks*BLOCK_SIZE + lastBlockSize;
+
+        if (op <= 4) {
+          return append(nBytes);
+        } else if (op <= 6) {
+          return truncateArbitrarily(nBytes);
+        } else {
+          return truncateToBlockBoundary(nBlocks);
+        }
+      }
+    }
+
+    String append(int n) throws IOException {
+      final StringBuilder b = new StringBuilder("append ")
+          .append(n).append(" bytes to ").append(file.getName());
+
+      final byte[] bytes = new byte[n];
+      DFSUtil.getRandom().nextBytes(bytes);
+      
+      { // write to local file
+        final FileOutputStream out = new FileOutputStream(localFile, true);
+        out.write(bytes, 0, bytes.length);
+        out.close();
+      }
+
+      {
+        final FSDataOutputStream out = dfs.append(file);
+        out.write(bytes, 0, bytes.length);
+        out.close();
+      }
+      return b.toString();
+    }
+    
+    String truncateArbitrarily(int nBytes) throws IOException {
+      Preconditions.checkArgument(nBytes > 0);
+      final int length = checkLength();
+      final StringBuilder b = new StringBuilder("truncateArbitrarily: ")
+          .append(nBytes).append(" bytes from ").append(file.getName())
+          .append(", length=" + length);
+
+      truncate(length > nBytes? length - nBytes: 0, b);
+      return b.toString();
+    }
+
+    String truncateToBlockBoundary(int nBlocks) throws IOException {
+      Preconditions.checkArgument(nBlocks > 0);
+      final int length = checkLength();
+      final StringBuilder b = new StringBuilder("truncateToBlockBoundary: ")
+          .append(nBlocks).append(" blocks from ").append(file.getName())
+          .append(", length=" + length);
+      final int n =  (nBlocks - 1)*BLOCK_SIZE + (length%BLOCK_SIZE);
+      Preconditions.checkState(truncate(length > n? length - n: 0, b), b);
+      return b.toString();
+    }
+
+    private boolean truncate(long newLength, StringBuilder b) throws IOException {
+      final RandomAccessFile raf = new RandomAccessFile(localFile, "rw");
+      raf.setLength(newLength);
+      raf.close();
+
+      final boolean isReady = dfs.truncate(file, newLength);
+      b.append(", newLength=").append(newLength)
+       .append(", isReady=").append(isReady);
+      if (!isReady) {
+        TestFileTruncate.checkBlockRecovery(file, dfs);
+      }
+      return isReady;
+    }
+    
+    int checkLength() throws IOException {
+      return checkLength(file, localFile);
+    }
+
+    static int checkLength(Path file, File localFile) throws IOException {
+      final long length = dfs.getFileStatus(file).getLen();
+      Assert.assertEquals(localFile.length(), length);
+      Assert.assertTrue(length <= Integer.MAX_VALUE);
+      return (int)length;
+    }
+    
+    String checkFullFile() throws IOException {
+      return checkFullFile(file, localFile);
+    }
+
+    static String checkFullFile(Path file, File localFile) throws IOException {
+      final StringBuilder b = new StringBuilder("checkFullFile: ")
+          .append(file.getName()).append(" vs ").append(localFile);
+      final byte[] bytes = new byte[checkLength(file, localFile)];
+      b.append(", length=").append(bytes.length);
+      
+      final FileInputStream in = new FileInputStream(localFile); 
+      for(int n = 0; n < bytes.length; ) {
+        n += in.read(bytes, n, bytes.length - n);
+      }
+      in.close();
+      
+      AppendTestUtil.checkFullFile(dfs, file, bytes.length, bytes,
+          "File content mismatch: " + b, false);
+      return b.toString();
+    }
+  }
+  
+  static abstract class Worker implements Callable<String> {
+    enum State {
+      IDLE(false), RUNNING(false), STOPPED(true), ERROR(true);
+      
+      final boolean isTerminated;
+      State(boolean isTerminated) {
+        this.isTerminated = isTerminated;
+      }
+    };
+
+    final String name;
+    final AtomicReference<State> state = new AtomicReference<State>(State.IDLE);
+    final AtomicBoolean isCalling = new AtomicBoolean();
+    final AtomicReference<Thread> thread = new AtomicReference<Thread>();
+    
+    Worker(String name) {
+      this.name = name;
+    }
+
+    void start() {
+      Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING));
+      
+      if (thread.get() == null) {
+        final Thread t = new Thread(null, new Runnable() {
+          @Override
+          public void run() {
+            final Random r = DFSUtil.getRandom();
+            for(State s; (s = state.get()) == State.RUNNING || s == State.IDLE;) {
+              if (s == State.RUNNING) {
+                isCalling.set(true);
+                try {
+                  LOG.info(call());
+                } catch (Exception e) {
+                  LOG.error("Worker " + name + " failed.", e);
+                  state.set(State.ERROR);
+                  return;
+                }
+                isCalling.set(false);
+              }
+              sleep(r.nextInt(100) + 50);
+            }
+          }
+        }, name);
+        Preconditions.checkState(thread.compareAndSet(null, t));
+        t.start();
+      }
+    }
+
+    boolean isPaused() {
+      return state.get() == State.IDLE && !isCalling.get();
+    }
+
+    void pause() {
+      Preconditions.checkState(state.compareAndSet(State.RUNNING, State.IDLE));
+    }
+
+    void stop() throws InterruptedException {
+      if (state.get() == State.ERROR) {
+        return;
+      }
+
+      state.set(State.STOPPED);
+      thread.get().join();
+    }
+
+    static void sleep(final long sleepTimeMs) {
+      try {
+        Thread.sleep(sleepTimeMs);
+      } catch (InterruptedException e) {
+        throw new RuntimeException(e);
+      }
+    }
+  }
+}


[03/50] [abbrv] hadoop git commit: YARN-3272. Surface container locality info in RM web UI (Jian He via wangda)

Posted by ji...@apache.org.
YARN-3272. Surface container locality info in RM web UI (Jian He via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4006739a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4006739a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4006739a

Branch: refs/heads/HDFS-7285
Commit: 4006739a2883ccc26d7c1af837d989bc529eb50d
Parents: 6bc2798
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Mar 3 11:49:01 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:22 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../dev-support/findbugs-exclude.xml            |  7 ++
 .../rmapp/attempt/RMAppAttemptMetrics.java      | 21 ++++-
 .../resourcemanager/scheduler/NodeType.java     |  9 +-
 .../scheduler/SchedulerApplicationAttempt.java  | 15 +++-
 .../scheduler/capacity/LeafQueue.java           | 95 +++++++++++++-------
 .../server/resourcemanager/webapp/AppBlock.java | 45 +++++++++-
 .../scheduler/capacity/TestReservations.java    |  8 +-
 8 files changed, 163 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0850f0b..5eaf4f4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -348,6 +348,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3281. Added RMStateStore to StateMachine visualization list.
     (Chengbing Liu via jianhe)
 
+    YARN-3272. Surface container locality info in RM web UI.
+    (Jian He via wangda)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 70f1a71..1c3f201 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -63,6 +63,13 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
   <Match>
+    <Class name="~org\.apache\.hadoop\.yarn\.server\.resourcemanager\.rmapp\.attempt\.RMAppAttemptMetrics" />
+    <Method name="getLocalityStatistics" />
+    <Bug pattern="EI_EXPOSE_REP" />
+    <Method name="incNumAllocatedContainers"/>
+    <Bug pattern="VO_VOLATILE_INCREMENT" />
+  </Match>
+  <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl$AppRejectedTransition" />
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index 0e60fd5..bc22073 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 public class RMAppAttemptMetrics {
@@ -49,6 +50,10 @@ public class RMAppAttemptMetrics {
   private AtomicLong finishedVcoreSeconds = new AtomicLong(0);
   private RMContext rmContext;
 
+  private int[][] localityStatistics =
+      new int[NodeType.values().length][NodeType.values().length];
+  private volatile int totalAllocatedContainers;
+
   public RMAppAttemptMetrics(ApplicationAttemptId attemptId,
       RMContext rmContext) {
     this.attemptId = attemptId;
@@ -57,7 +62,7 @@ public class RMAppAttemptMetrics {
     this.writeLock = lock.writeLock();
     this.rmContext = rmContext;
   }
-  
+
   public void updatePreemptionInfo(Resource resource, RMContainer container) {
     try {
       writeLock.lock();
@@ -126,4 +131,18 @@ public class RMAppAttemptMetrics {
     this.finishedMemorySeconds.addAndGet(finishedMemorySeconds);
     this.finishedVcoreSeconds.addAndGet(finishedVcoreSeconds);
   }
+
+  public void incNumAllocatedContainers(NodeType containerType,
+      NodeType requestType) {
+    localityStatistics[containerType.index][requestType.index]++;
+    totalAllocatedContainers++;
+  }
+
+  public int[][] getLocalityStatistics() {
+    return this.localityStatistics;
+  }
+
+  public int getTotalAllocatedContainers() {
+    return this.totalAllocatedContainers;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
index 821ec24..2b193bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
@@ -22,7 +22,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
  * Resource classification.
  */
 public enum NodeType {
-  NODE_LOCAL,
-  RACK_LOCAL,
-  OFF_SWITCH
+  NODE_LOCAL(0), RACK_LOCAL(1), OFF_SWITCH(2);
+  public int index;
+
+  private NodeType(int index) {
+    this.index = index;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 532df05..ed78097 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
@@ -78,7 +79,7 @@ public class SchedulerApplicationAttempt {
   private long lastVcoreSeconds = 0;
 
   protected final AppSchedulingInfo appSchedulingInfo;
-  
+  protected ApplicationAttemptId attemptId;
   protected Map<ContainerId, RMContainer> liveContainers =
       new HashMap<ContainerId, RMContainer>();
   protected final Map<Priority, Map<NodeId, RMContainer>> reservedContainers = 
@@ -132,6 +133,7 @@ public class SchedulerApplicationAttempt {
             activeUsersManager, rmContext.getEpoch());
     this.queue = queue;
     this.pendingRelease = new HashSet<ContainerId>();
+    this.attemptId = applicationAttemptId;
     if (rmContext.getRMApps() != null &&
         rmContext.getRMApps()
             .containsKey(applicationAttemptId.getApplicationId())) {
@@ -619,4 +621,15 @@ public class SchedulerApplicationAttempt {
     // schedulingOpportunities
     // lastScheduledContainer
   }
+
+  public void incNumAllocatedContainers(NodeType containerType,
+      NodeType requestType) {
+    RMAppAttempt attempt =
+        rmContext.getRMApps().get(attemptId.getApplicationId())
+          .getCurrentAppAttempt();
+    if (attempt != null) {
+      attempt.getRMAppAttemptMetrics().incNumAllocatedContainers(containerType,
+        requestType);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 3910ac8..a607a62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -32,6 +32,7 @@ import java.util.Set;
 import java.util.TreeSet;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.mutable.MutableObject;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -1242,15 +1243,25 @@ public class LeafQueue extends AbstractCSQueue {
       RMContainer reservedContainer, boolean needToUnreserve) {
     Resource assigned = Resources.none();
 
+    NodeType requestType = null;
+    MutableObject allocatedContainer = new MutableObject();
     // Data-local
     ResourceRequest nodeLocalResourceRequest =
         application.getResourceRequest(priority, node.getNodeName());
     if (nodeLocalResourceRequest != null) {
-      assigned = 
-          assignNodeLocalContainers(clusterResource, nodeLocalResourceRequest,
-              node, application, priority, reservedContainer, needToUnreserve); 
-      if (Resources.greaterThan(resourceCalculator, clusterResource, 
+      requestType = NodeType.NODE_LOCAL;
+      assigned =
+          assignNodeLocalContainers(clusterResource, nodeLocalResourceRequest, 
+            node, application, priority, reservedContainer, needToUnreserve,
+            allocatedContainer);
+      if (Resources.greaterThan(resourceCalculator, clusterResource,
           assigned, Resources.none())) {
+
+        //update locality statistics
+        if (allocatedContainer.getValue() != null) {
+          application.incNumAllocatedContainers(NodeType.NODE_LOCAL,
+            requestType);
+        }
         return new CSAssignment(assigned, NodeType.NODE_LOCAL);
       }
     }
@@ -1262,12 +1273,23 @@ public class LeafQueue extends AbstractCSQueue {
       if (!rackLocalResourceRequest.getRelaxLocality()) {
         return SKIP_ASSIGNMENT;
       }
-      
-      assigned =
-          assignRackLocalContainers(clusterResource, rackLocalResourceRequest,
-              node, application, priority, reservedContainer, needToUnreserve);
-      if (Resources.greaterThan(resourceCalculator, clusterResource, 
+
+      if (requestType != NodeType.NODE_LOCAL) {
+        requestType = NodeType.RACK_LOCAL;
+      }
+
+      assigned = 
+          assignRackLocalContainers(clusterResource, rackLocalResourceRequest, 
+            node, application, priority, reservedContainer, needToUnreserve,
+            allocatedContainer);
+      if (Resources.greaterThan(resourceCalculator, clusterResource,
           assigned, Resources.none())) {
+
+        //update locality statistics
+        if (allocatedContainer.getValue() != null) {
+          application.incNumAllocatedContainers(NodeType.RACK_LOCAL,
+            requestType);
+        }
         return new CSAssignment(assigned, NodeType.RACK_LOCAL);
       }
     }
@@ -1279,11 +1301,21 @@ public class LeafQueue extends AbstractCSQueue {
       if (!offSwitchResourceRequest.getRelaxLocality()) {
         return SKIP_ASSIGNMENT;
       }
+      if (requestType != NodeType.NODE_LOCAL
+          && requestType != NodeType.RACK_LOCAL) {
+        requestType = NodeType.OFF_SWITCH;
+      }
+
+      assigned =
+          assignOffSwitchContainers(clusterResource, offSwitchResourceRequest,
+            node, application, priority, reservedContainer, needToUnreserve,
+            allocatedContainer);
 
-      return new CSAssignment(assignOffSwitchContainers(clusterResource,
-          offSwitchResourceRequest, node, application, priority,
-          reservedContainer, needToUnreserve),
-          NodeType.OFF_SWITCH);
+      // update locality statistics
+      if (allocatedContainer.getValue() != null) {
+        application.incNumAllocatedContainers(NodeType.OFF_SWITCH, requestType);
+      }
+      return new CSAssignment(assigned, NodeType.OFF_SWITCH);
     }
     
     return SKIP_ASSIGNMENT;
@@ -1370,40 +1402,43 @@ public class LeafQueue extends AbstractCSQueue {
   private Resource assignNodeLocalContainers(Resource clusterResource,
       ResourceRequest nodeLocalResourceRequest, FiCaSchedulerNode node,
       FiCaSchedulerApp application, Priority priority,
-      RMContainer reservedContainer, boolean needToUnreserve) {
-    if (canAssign(application, priority, node, NodeType.NODE_LOCAL,
+      RMContainer reservedContainer, boolean needToUnreserve,
+      MutableObject allocatedContainer) {
+    if (canAssign(application, priority, node, NodeType.NODE_LOCAL, 
         reservedContainer)) {
       return assignContainer(clusterResource, node, application, priority,
           nodeLocalResourceRequest, NodeType.NODE_LOCAL, reservedContainer,
-          needToUnreserve);
+          needToUnreserve, allocatedContainer);
     }
     
     return Resources.none();
   }
 
-  private Resource assignRackLocalContainers(Resource clusterResource,
-      ResourceRequest rackLocalResourceRequest, FiCaSchedulerNode node,
-      FiCaSchedulerApp application, Priority priority,
-      RMContainer reservedContainer, boolean needToUnreserve) {
-    if (canAssign(application, priority, node, NodeType.RACK_LOCAL, 
+  private Resource assignRackLocalContainers(
+      Resource clusterResource, ResourceRequest rackLocalResourceRequest,  
+      FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority,
+      RMContainer reservedContainer, boolean needToUnreserve,
+      MutableObject allocatedContainer) {
+    if (canAssign(application, priority, node, NodeType.RACK_LOCAL,
         reservedContainer)) {
       return assignContainer(clusterResource, node, application, priority,
           rackLocalResourceRequest, NodeType.RACK_LOCAL, reservedContainer,
-          needToUnreserve);
+          needToUnreserve, allocatedContainer);
     }
     
     return Resources.none();
   }
 
-  private Resource assignOffSwitchContainers(Resource clusterResource,
-      ResourceRequest offSwitchResourceRequest, FiCaSchedulerNode node,
-      FiCaSchedulerApp application, Priority priority,
-      RMContainer reservedContainer, boolean needToUnreserve) {
-    if (canAssign(application, priority, node, NodeType.OFF_SWITCH, 
+  private Resource assignOffSwitchContainers(
+      Resource clusterResource, ResourceRequest offSwitchResourceRequest,
+      FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, 
+      RMContainer reservedContainer, boolean needToUnreserve,
+      MutableObject allocatedContainer) {
+    if (canAssign(application, priority, node, NodeType.OFF_SWITCH,
         reservedContainer)) {
       return assignContainer(clusterResource, node, application, priority,
           offSwitchResourceRequest, NodeType.OFF_SWITCH, reservedContainer,
-          needToUnreserve);
+          needToUnreserve, allocatedContainer);
     }
     
     return Resources.none();
@@ -1487,7 +1522,7 @@ public class LeafQueue extends AbstractCSQueue {
   private Resource assignContainer(Resource clusterResource, FiCaSchedulerNode node, 
       FiCaSchedulerApp application, Priority priority, 
       ResourceRequest request, NodeType type, RMContainer rmContainer,
-      boolean needToUnreserve) {
+      boolean needToUnreserve, MutableObject createdContainer) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("assignContainers: node=" + node.getNodeName()
         + " application=" + application.getApplicationId()
@@ -1592,7 +1627,7 @@ public class LeafQueue extends AbstractCSQueue {
           " container=" + container + 
           " queue=" + this + 
           " clusterResource=" + clusterResource);
-
+      createdContainer.setValue(allocatedContainer);
       return container.getResource();
     } else {
       // if we are allowed to allocate but this node doesn't have space, reserve it or

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index 62ad8df..45df93e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -204,18 +204,55 @@ public class AppBlock extends HtmlBlock {
     table._();
     div._();
 
+    createContainerLocalityTable(html, attemptMetrics);
     createResourceRequestsTable(html, app);
   }
 
+  private void createContainerLocalityTable(Block html,
+      RMAppAttemptMetrics attemptMetrics) {
+    if (attemptMetrics == null) {
+      return;
+    }
+
+    DIV<Hamlet> div = html.div(_INFO_WRAP);
+    TABLE<DIV<Hamlet>> table =
+        div.h3(
+          "Total Allocated Containers: "
+              + attemptMetrics.getTotalAllocatedContainers()).h3("Each table cell"
+            + " represents the number of NodeLocal/RackLocal/OffSwitch containers"
+            + " satisfied by NodeLocal/RackLocal/OffSwitch resource requests.").table(
+          "#containerLocality");
+    table.
+      tr().
+        th(_TH, "").
+        th(_TH, "Node Local Request").
+        th(_TH, "Rack Local Request").
+        th(_TH, "Off Switch Request").
+      _();
+
+    String[] containersType =
+        { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)",
+            "Num Off Switch Containers (satisfied by)" };
+    boolean odd = false;
+    for (int i = 0; i < attemptMetrics.getLocalityStatistics().length; i++) {
+      table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i])
+        .td(String.valueOf(attemptMetrics.getLocalityStatistics()[i][0]))
+        .td(i == 0 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][1]))
+        .td(i <= 1 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][2]))._();
+    }
+    table._();
+    div._();
+  }
+
   private void createResourceRequestsTable(Block html, AppInfo app) {
     TBODY<TABLE<Hamlet>> tbody =
         html.table("#ResourceRequests").thead().tr()
           .th(".priority", "Priority")
-          .th(".resourceName", "ResourceName")
+          .th(".resourceName", "Resource Name")
           .th(".totalResource", "Capability")
-          .th(".numContainers", "NumContainers")
-          .th(".relaxLocality", "RelaxLocality")
-          .th(".nodeLabelExpression", "NodeLabelExpression")._()._().tbody();
+          .th(".numContainers", "Num Containers")
+          .th(".relaxLocality", "Relax Locality")
+          .th(".nodeLabelExpression", "Node Label Expression")._()._().tbody();
 
     Resource totalResource = Resource.newInstance(0, 0);
     if (app.getResourceRequests() != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4006739a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
index 4c6b25f..b3250e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
@@ -218,6 +219,7 @@ public class TestReservations {
         .getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a,
         mock(ActiveUsersManager.class), spyRMContext);
+    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
 
     a.submitApplicationAttempt(app_0, user_0); 
 
@@ -373,6 +375,7 @@ public class TestReservations {
         .getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a,
         mock(ActiveUsersManager.class), spyRMContext);
+    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
 
     a.submitApplicationAttempt(app_0, user_0); 
 
@@ -524,6 +527,7 @@ public class TestReservations {
         .getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a,
         mock(ActiveUsersManager.class), spyRMContext);
+    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
 
     a.submitApplicationAttempt(app_0, user_0); 
 
@@ -765,6 +769,7 @@ public class TestReservations {
         .getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a,
         mock(ActiveUsersManager.class), spyRMContext);
+    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
 
     a.submitApplicationAttempt(app_0, user_0); 
 
@@ -943,7 +948,7 @@ public class TestReservations {
         .getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a,
         mock(ActiveUsersManager.class), spyRMContext);
-
+    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
     a.submitApplicationAttempt(app_0, user_0); 
 
     final ApplicationAttemptId appAttemptId_1 = TestUtils
@@ -1073,6 +1078,7 @@ public class TestReservations {
         .getMockApplicationAttemptId(0, 0);
     FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a,
         mock(ActiveUsersManager.class), spyRMContext);
+    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
 
     a.submitApplicationAttempt(app_0, user_0); 
 


[11/50] [abbrv] hadoop git commit: MAPREDUCE-6248. Exposed the internal MapReduce job's information as a public API in DistCp. Contributed by Jing Zhao.

Posted by ji...@apache.org.
MAPREDUCE-6248. Exposed the internal MapReduce job's information as a public API in DistCp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bab6209c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bab6209c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bab6209c

Branch: refs/heads/HDFS-7285
Commit: bab6209c170d1127680f8d0e975e2e54e9c63ccc
Parents: ff1b358
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Tue Mar 3 16:28:22 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../java/org/apache/hadoop/tools/DistCp.java    | 47 +++++++++++++++-----
 2 files changed, 39 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab6209c/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 7a2eff3..b2ae9d9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -320,6 +320,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-5612. Add javadoc for TaskCompletionEvent.Status.
     (Chris Palmer via aajisaka)
 
+    MAPREDUCE-6248. Exposed the internal MapReduce job's information as a public
+    API in DistCp. (Jing Zhao via vinodkv)
+
   OPTIMIZATIONS
 
     MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bab6209c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index 28535a7..b80aeb8 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.tools;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,12 +53,14 @@ import com.google.common.annotations.VisibleForTesting;
  * launch the copy-job. DistCp may alternatively be sub-classed to fine-tune
  * behaviour.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class DistCp extends Configured implements Tool {
 
   /**
-   * Priority of the ResourceManager shutdown hook.
+   * Priority of the shutdown hook.
    */
-  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+  static final int SHUTDOWN_HOOK_PRIORITY = 30;
 
   private static final Log LOG = LogFactory.getLog(DistCp.class);
 
@@ -66,7 +70,7 @@ public class DistCp extends Configured implements Tool {
   private static final String PREFIX = "_distcp";
   private static final String WIP_PREFIX = "._WIP_";
   private static final String DISTCP_DEFAULT_XML = "distcp-default.xml";
-  public static final Random rand = new Random();
+  static final Random rand = new Random();
 
   private boolean submitted;
   private FileSystem jobFS;
@@ -90,7 +94,7 @@ public class DistCp extends Configured implements Tool {
    * To be used with the ToolRunner. Not for public consumption.
    */
   @VisibleForTesting
-  public DistCp() {}
+  DistCp() {}
 
   /**
    * Implementation of Tool::run(). Orchestrates the copy of source file(s)
@@ -100,6 +104,7 @@ public class DistCp extends Configured implements Tool {
    * @param argv List of arguments passed to DistCp, from the ToolRunner.
    * @return On success, it returns 0. Else, -1.
    */
+  @Override
   public int run(String[] argv) {
     if (argv.length < 1) {
       OptionsParser.usage();
@@ -145,9 +150,21 @@ public class DistCp extends Configured implements Tool {
    * @throws Exception
    */
   public Job execute() throws Exception {
+    Job job = createAndSubmitJob();
+
+    if (inputOptions.shouldBlock()) {
+      waitForJobCompletion(job);
+    }
+    return job;
+  }
+
+  /**
+   * Create and submit the mapreduce job.
+   * @return The mapreduce job object that has been submitted
+   */
+  public Job createAndSubmitJob() throws Exception {
     assert inputOptions != null;
     assert getConf() != null;
-
     Job job = null;
     try {
       synchronized(this) {
@@ -169,16 +186,24 @@ public class DistCp extends Configured implements Tool {
 
     String jobID = job.getJobID().toString();
     job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID);
-    
     LOG.info("DistCp job-id: " + jobID);
-    if (inputOptions.shouldBlock() && !job.waitForCompletion(true)) {
-      throw new IOException("DistCp failure: Job " + jobID + " has failed: "
-          + job.getStatus().getFailureInfo());
-    }
+
     return job;
   }
 
   /**
+   * Wait for the given job to complete.
+   * @param job the given mapreduce job that has already been submitted
+   */
+  public void waitForJobCompletion(Job job) throws Exception {
+    assert job != null;
+    if (!job.waitForCompletion(true)) {
+      throw new IOException("DistCp failure: Job " + job.getJobID()
+          + " has failed: " + job.getStatus().getFailureInfo());
+    }
+  }
+
+  /**
    * Set targetPathExists in both inputOptions and job config,
    * for the benefit of CopyCommitter
    */
@@ -436,7 +461,7 @@ public class DistCp extends Configured implements Tool {
   private static class Cleanup implements Runnable {
     private final DistCp distCp;
 
-    public Cleanup(DistCp distCp) {
+    Cleanup(DistCp distCp) {
       this.distCp = distCp;
     }
 


[46/50] [abbrv] hadoop git commit: Revert "HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang."

Posted by ji...@apache.org.
Revert "HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang."

This reverts commit d799fbe1ccf8752c44f087e34b5f400591d3b5bd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38b921a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38b921a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38b921a8

Branch: refs/heads/HDFS-7285
Commit: 38b921a88feeffb4e12070ed866cb0f171c36f8c
Parents: 129f88a
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Sun Mar 8 20:54:43 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:17:55 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ipc/Server.java         | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38b921a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index d2d61b3..893e0eb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,15 +1324,10 @@ public abstract class Server {
           saslResponse = processSaslMessage(saslMessage);
         } catch (IOException e) {
           rpcMetrics.incrAuthenticationFailures();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(StringUtils.stringifyException(e));
-          }
           // attempting user could be null
-          IOException tce = (IOException) getTrueCause(e);
           AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
-              + attemptingUser + " (" + e.getLocalizedMessage()
-              + ") with true cause: (" + tce.getLocalizedMessage() + ")");
-          throw tce;
+              + attemptingUser + " (" + e.getLocalizedMessage() + ")");
+          throw (IOException) getTrueCause(e);
         }
         
         if (saslServer != null && saslServer.isComplete()) {


[41/50] [abbrv] hadoop git commit: HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0. Contributed by Shashank Khandelwal and Ivan Mitic.

Posted by ji...@apache.org.
HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0. Contributed by Shashank Khandelwal and Ivan Mitic.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd633373
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd633373
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd633373

Branch: refs/heads/HDFS-7285
Commit: fd63337314557e9c8078e0e30ce7e43a05698594
Parents: a2f91d9
Author: cnauroth <cn...@apache.org>
Authored: Fri Mar 6 14:59:09 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:27 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 hadoop-project/pom.xml                          |  6 +-
 hadoop-tools/hadoop-azure/pom.xml               |  7 +-
 .../fs/azure/AzureNativeFileSystemStore.java    | 37 ++++++-----
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 10 +--
 .../hadoop/fs/azure/PageBlobFormatHelpers.java  |  2 +-
 .../hadoop/fs/azure/PageBlobInputStream.java    |  8 +--
 .../hadoop/fs/azure/PageBlobOutputStream.java   |  8 +--
 .../hadoop/fs/azure/SelfRenewingLease.java      |  6 +-
 .../fs/azure/SelfThrottlingIntercept.java       | 10 +--
 .../hadoop/fs/azure/SendRequestIntercept.java   | 16 +++--
 .../hadoop/fs/azure/StorageInterface.java       | 24 +++----
 .../hadoop/fs/azure/StorageInterfaceImpl.java   | 46 +++++++------
 .../fs/azure/metrics/ErrorMetricUpdater.java    |  8 +--
 .../metrics/ResponseReceivedMetricUpdater.java  | 10 +--
 .../fs/azure/AzureBlobStorageTestAccount.java   | 28 ++++----
 .../hadoop/fs/azure/MockStorageInterface.java   | 70 ++++++++++++++------
 .../fs/azure/NativeAzureFileSystemBaseTest.java |  6 +-
 .../TestAzureFileSystemErrorConditions.java     |  6 +-
 .../hadoop/fs/azure/TestBlobDataValidation.java | 20 +++---
 .../hadoop/fs/azure/TestContainerChecks.java    |  6 +-
 .../TestOutOfBandAzureBlobOperationsLive.java   |  4 +-
 .../fs/azure/TestWasbUriAndConfiguration.java   |  4 +-
 23 files changed, 190 insertions(+), 155 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 628faa3..14cd75a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11648. Set DomainSocketWatcher thread name explicitly.
     (Liang Xie via ozawa)
 
+    HADOOP-11642. Upgrade azure sdk version from 0.6.0 to 2.0.0.
+    (Shashank Khandelwal and Ivan Mitic via cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2c0f03a..a6127c7 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -902,9 +902,9 @@
       </dependency>
 
       <dependency>
-        <groupId>com.microsoft.windowsazure.storage</groupId>
-        <artifactId>microsoft-windowsazure-storage-sdk</artifactId>
-        <version>0.6.0</version>
+        <groupId>com.microsoft.azure</groupId>
+        <artifactId>azure-storage</artifactId>
+        <version>2.0.0</version>
      </dependency>
 
      <dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index d39dd76..e9b3af7 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -140,12 +140,13 @@
       <artifactId>httpclient</artifactId>
       <scope>compile</scope>
     </dependency>
-
+    
     <dependency>
-      <groupId>com.microsoft.windowsazure.storage</groupId>
-      <artifactId>microsoft-windowsazure-storage-sdk</artifactId>
+      <groupId>com.microsoft.azure</groupId>
+      <artifactId>azure-storage</artifactId>
       <scope>compile</scope>
     </dependency>
+    
 
     <dependency>
       <groupId>com.google.guava</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index c0c03b3..b664fe7 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -65,23 +65,23 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryExponentialRetry;
-import com.microsoft.windowsazure.storage.RetryNoRetry;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageCredentialsAccountAndKey;
-import com.microsoft.windowsazure.storage.StorageCredentialsSharedAccessSignature;
-import com.microsoft.windowsazure.storage.StorageErrorCode;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CopyStatus;
-import com.microsoft.windowsazure.storage.blob.DeleteSnapshotsOption;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.core.Utility;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryExponentialRetry;
+import com.microsoft.azure.storage.RetryNoRetry;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
+import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
+import com.microsoft.azure.storage.StorageErrorCode;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CopyStatus;
+import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.core.Utility;
 
 /**
  * Core implementation of Windows Azure Filesystem for Hadoop.
@@ -2543,7 +2543,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     try {
       checkContainer(ContainerAccessType.ReadThenWrite);
       CloudBlobWrapper blob = getBlobReference(key);
-      blob.getProperties().setLastModified(lastModified);
+      //setLastModified function is not available in 2.0.0 version. blob.uploadProperties automatically updates last modified
+      //timestamp to current time
       blob.uploadProperties(getInstrumentedContext(), folderLease);
     } catch (Exception e) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 0248b85..e39b37d 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -74,11 +74,11 @@ import org.codehaus.jackson.map.JsonMappingException;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.core.*;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.core.*;
 
 /**
  * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
index ad11aac..9a316a5 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobFormatHelpers.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.fs.azure;
 
 import java.nio.ByteBuffer;
 
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
 
 /**
  * Constants and helper methods for ASV's custom data format in page blobs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 62b47ee..468ac65 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -33,10 +33,10 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.PageRange;
 
 /**
  * An input stream that reads file data from a page blob stored

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
index 4d1d5c8..2b8846c 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
@@ -44,10 +44,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudPageBlob;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudPageBlob;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index bda6006..06f32ce 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -22,9 +22,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlob;
 
 import java.util.concurrent.atomic.AtomicInteger;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
index d18a144..a9e3df9 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java
@@ -25,11 +25,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RequestResult;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.SendingRequestEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RequestResult;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 /*
  * Self throttling is implemented by hooking into send & response callbacks 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
index 18f173e..4d564d5 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java
@@ -25,12 +25,13 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.Constants.HeaderConstants;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.SendingRequestEvent;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageEvent;
-import com.microsoft.windowsazure.storage.StorageException;
+import com.microsoft.azure.storage.Constants.HeaderConstants;
+import com.microsoft.azure.storage.core.StorageCredentialsHelper;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.StorageException;
 
 /**
  * Manages the lifetime of binding on the operation contexts to intercept send
@@ -146,7 +147,8 @@ public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent
       try {
         // Sign the request. GET's have no payload so the content length is
         // zero.
-        getCredentials().signBlobAndQueueRequest(urlConnection, -1L, getOperationContext());
+        StorageCredentialsHelper.signBlobAndQueueRequest(getCredentials(),
+          urlConnection, -1L, getOperationContext());
       } catch (InvalidKeyException e) {
         // Log invalid key exception to track signing error before the send
         // fails.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
index 8d0229d..91928a2 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
@@ -29,18 +29,18 @@ import java.util.HashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryPolicyFactory;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CopyState;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryPolicyFactory;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CopyState;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.blob.PageRange;
 
 /**
  * This is a very thin layer over the methods exposed by the Windows Azure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
index e44823c..2120536 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
@@ -30,26 +30,26 @@ import java.util.Iterator;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryPolicyFactory;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.StorageUri;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CloudBlobClient;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlobDirectory;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
-import com.microsoft.windowsazure.storage.blob.CloudPageBlob;
-import com.microsoft.windowsazure.storage.blob.CopyState;
-import com.microsoft.windowsazure.storage.blob.DeleteSnapshotsOption;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryPolicyFactory;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.StorageUri;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlobDirectory;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.CloudPageBlob;
+import com.microsoft.azure.storage.blob.CopyState;
+import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.blob.PageRange;
 
 /**
  * A real implementation of the Azure interaction layer that just redirects
@@ -61,12 +61,14 @@ class StorageInterfaceImpl extends StorageInterface {
 
   @Override
   public void setRetryPolicyFactory(final RetryPolicyFactory retryPolicyFactory) {
-    serviceClient.setRetryPolicyFactory(retryPolicyFactory);
+    serviceClient.getDefaultRequestOptions().setRetryPolicyFactory(
+            retryPolicyFactory);
   }
 
   @Override
   public void setTimeoutInMs(int timeoutInMs) {
-    serviceClient.setTimeoutInMs(timeoutInMs);
+    serviceClient.getDefaultRequestOptions().setTimeoutIntervalInMs(
+            timeoutInMs);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
index d33e8c4..dc23354 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ErrorMetricUpdater.java
@@ -24,10 +24,10 @@ import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; //500
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RequestResult;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RequestResult;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
index 676adb9..de503bf 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java
@@ -24,11 +24,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.microsoft.windowsazure.storage.Constants.HeaderConstants;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RequestResult;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.Constants.HeaderConstants;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RequestResult;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index b8ff912..635c024 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -40,20 +40,20 @@ import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageCredentialsAccountAndKey;
-import com.microsoft.windowsazure.storage.StorageCredentialsAnonymous;
-import com.microsoft.windowsazure.storage.blob.BlobContainerPermissions;
-import com.microsoft.windowsazure.storage.blob.BlobContainerPublicAccessType;
-import com.microsoft.windowsazure.storage.blob.BlobOutputStream;
-import com.microsoft.windowsazure.storage.blob.CloudBlobClient;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
-import com.microsoft.windowsazure.storage.blob.SharedAccessBlobPermissions;
-import com.microsoft.windowsazure.storage.blob.SharedAccessBlobPolicy;
-import com.microsoft.windowsazure.storage.core.Base64;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
+import com.microsoft.azure.storage.StorageCredentialsAnonymous;
+import com.microsoft.azure.storage.blob.BlobContainerPermissions;
+import com.microsoft.azure.storage.blob.BlobContainerPublicAccessType;
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions;
+import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy;
+import com.microsoft.azure.storage.core.Base64;
 
 /**
  * Helper class to create WASB file systems backed by either a mock in-memory

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
index 047ea1b..c51c05b 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
@@ -22,10 +22,12 @@ import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.lang.reflect.Method;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Calendar;
+import java.util.Date;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -35,21 +37,21 @@ import org.apache.commons.httpclient.URIException;
 import org.apache.commons.httpclient.util.URIUtil;
 import org.apache.commons.lang.NotImplementedException;
 
-import com.microsoft.windowsazure.storage.CloudStorageAccount;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.RetryPolicyFactory;
-import com.microsoft.windowsazure.storage.StorageCredentials;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.StorageUri;
-import com.microsoft.windowsazure.storage.blob.BlobListingDetails;
-import com.microsoft.windowsazure.storage.blob.BlobProperties;
-import com.microsoft.windowsazure.storage.blob.BlobRequestOptions;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlobDirectory;
-import com.microsoft.windowsazure.storage.blob.CopyState;
-import com.microsoft.windowsazure.storage.blob.ListBlobItem;
-import com.microsoft.windowsazure.storage.blob.PageRange;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryPolicyFactory;
+import com.microsoft.azure.storage.StorageCredentials;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.StorageUri;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobProperties;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlobDirectory;
+import com.microsoft.azure.storage.blob.CopyState;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import com.microsoft.azure.storage.blob.PageRange;
 
 import javax.ws.rs.core.UriBuilder;
 import javax.ws.rs.core.UriBuilderException;
@@ -357,18 +359,42 @@ public class MockStorageInterface extends StorageInterface {
       this.uri = uri;
       this.metadata = metadata;
       this.properties = new BlobProperties();
-      this.properties.setLength(length);
-      this.properties.setLastModified(
-          Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
+      
+      this.properties=updateLastModifed(this.properties);
+      this.properties=updateLength(this.properties,length);
+    }
+    
+    protected BlobProperties updateLastModifed(BlobProperties properties){
+      try{
+          Method setLastModified =properties.getClass().
+            getDeclaredMethod("setLastModified", Date.class);
+          setLastModified.setAccessible(true);
+          setLastModified.invoke(this.properties,
+            Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
+      }catch(Exception e){
+          throw new RuntimeException(e);
+      }
+      return properties;
     }
-
+    
+    protected BlobProperties updateLength(BlobProperties properties,int length) {
+      try{
+          Method setLength =properties.getClass().
+            getDeclaredMethod("setLength", long.class);
+          setLength.setAccessible(true);
+          setLength.invoke(this.properties, length);
+      }catch (Exception e){
+         throw new RuntimeException(e);
+      }
+      return properties;
+    }
+    
     protected void refreshProperties(boolean getMetadata) {
       if (backingStore.exists(convertUriToDecodedString(uri))) {
         byte[] content = backingStore.getContent(convertUriToDecodedString(uri));
         properties = new BlobProperties();
-        properties.setLength(content.length);
-        properties.setLastModified(
-            Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime());
+        this.properties=updateLastModifed(this.properties);
+        this.properties=updateLength(this.properties, content.length);
         if (getMetadata) {
           metadata = backingStore.getMetadata(convertUriToDecodedString(uri));
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
index 01cf713..9ce6cc9 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
@@ -58,9 +58,9 @@ import org.junit.Test;
 import org.apache.hadoop.fs.azure.AzureException;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
 
-import com.microsoft.windowsazure.storage.AccessCondition;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.AccessCondition;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlob;
 
 /*
  * Tests the Native Azure file system (WASB) against an actual blob store if

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
index febb605..ace57dc 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
@@ -37,9 +37,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.SendingRequestEvent;
-import com.microsoft.windowsazure.storage.StorageEvent;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.SendingRequestEvent;
+import com.microsoft.azure.storage.StorageEvent;
 
 public class TestAzureFileSystemErrorConditions {
   private static final int ALL_THREE_FILE_SIZE = 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
index 25bd338..9237ade 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobDataValidation.java
@@ -39,16 +39,16 @@ import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationCo
 import org.junit.After;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.Constants;
-import com.microsoft.windowsazure.storage.OperationContext;
-import com.microsoft.windowsazure.storage.ResponseReceivedEvent;
-import com.microsoft.windowsazure.storage.StorageErrorCodeStrings;
-import com.microsoft.windowsazure.storage.StorageEvent;
-import com.microsoft.windowsazure.storage.StorageException;
-import com.microsoft.windowsazure.storage.blob.BlockEntry;
-import com.microsoft.windowsazure.storage.blob.BlockSearchMode;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
-import com.microsoft.windowsazure.storage.core.Base64;
+import com.microsoft.azure.storage.Constants;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.ResponseReceivedEvent;
+import com.microsoft.azure.storage.StorageErrorCodeStrings;
+import com.microsoft.azure.storage.StorageEvent;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlockEntry;
+import com.microsoft.azure.storage.blob.BlockSearchMode;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.core.Base64;
 
 /**
  * Test that we do proper data integrity validation with MD5 checks as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
index 727f540..56ec881 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestContainerChecks.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
 import org.junit.After;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.blob.BlobOutputStream;
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
 
 /**
  * Tests that WASB creates containers only if needed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
index 9ac67e7..60b01c6 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java
@@ -29,8 +29,8 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.blob.BlobOutputStream;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.BlobOutputStream;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
 
 public class TestOutOfBandAzureBlobOperationsLive {
   private FileSystem fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd633373/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
index 0360e32..a4ca6fd 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
@@ -42,8 +42,8 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.microsoft.windowsazure.storage.blob.CloudBlobContainer;
-import com.microsoft.windowsazure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
 
 public class TestWasbUriAndConfiguration {
 


[05/50] [abbrv] hadoop git commit: MAPREDUCE-5583. Ability to limit running map and reduce tasks. Contributed by Jason Lowe.

Posted by ji...@apache.org.
MAPREDUCE-5583. Ability to limit running map and reduce tasks. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c9b55e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c9b55e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c9b55e

Branch: refs/heads/HDFS-7285
Commit: 68c9b55e9d3ff5959b750502724d9c3db23171c1
Parents: 4a3ef07
Author: Junping Du <ju...@apache.org>
Authored: Tue Mar 3 02:01:04 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:22 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../v2/app/rm/RMContainerAllocator.java         |  65 +++++-
 .../v2/app/rm/RMContainerRequestor.java         |  74 ++++++-
 .../v2/app/rm/TestRMContainerAllocator.java     | 214 +++++++++++++++++++
 .../apache/hadoop/mapreduce/MRJobConfig.java    |   8 +
 .../src/main/resources/mapred-default.xml       |  16 ++
 6 files changed, 363 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 5524b14..7a2eff3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -258,6 +258,9 @@ Release 2.7.0 - UNRELEASED
 
     MAPREDUCE-6228. Add truncate operation to SLive. (Plamen Jeliazkov via shv)
 
+    MAPREDUCE-5583. Ability to limit running map and reduce tasks. 
+    (Jason Lowe via junping_du)
+
   IMPROVEMENTS
 
     MAPREDUCE-6149. Document override log4j.properties in MR job.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 1acfeec..efea674 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -99,9 +99,9 @@ public class RMContainerAllocator extends RMContainerRequestor
   public static final 
   float DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART = 0.05f;
   
-  private static final Priority PRIORITY_FAST_FAIL_MAP;
-  private static final Priority PRIORITY_REDUCE;
-  private static final Priority PRIORITY_MAP;
+  static final Priority PRIORITY_FAST_FAIL_MAP;
+  static final Priority PRIORITY_REDUCE;
+  static final Priority PRIORITY_MAP;
 
   @VisibleForTesting
   public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted "
@@ -166,6 +166,8 @@ public class RMContainerAllocator extends RMContainerRequestor
    */
   private long allocationDelayThresholdMs = 0;
   private float reduceSlowStart = 0;
+  private int maxRunningMaps = 0;
+  private int maxRunningReduces = 0;
   private long retryInterval;
   private long retrystartTime;
   private Clock clock;
@@ -201,6 +203,10 @@ public class RMContainerAllocator extends RMContainerRequestor
     allocationDelayThresholdMs = conf.getInt(
         MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
         MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms
+    maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT,
+        MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT);
+    maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT,
+        MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT);
     RackResolver.init(conf);
     retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
                                 MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
@@ -664,6 +670,8 @@ public class RMContainerAllocator extends RMContainerRequestor
   
   @SuppressWarnings("unchecked")
   private List<Container> getResources() throws Exception {
+    applyConcurrentTaskLimits();
+
     // will be null the first time
     Resource headRoom =
         getAvailableResources() == null ? Resources.none() :
@@ -778,6 +786,43 @@ public class RMContainerAllocator extends RMContainerRequestor
     return newContainers;
   }
 
+  private void applyConcurrentTaskLimits() {
+    int numScheduledMaps = scheduledRequests.maps.size();
+    if (maxRunningMaps > 0 && numScheduledMaps > 0) {
+      int maxRequestedMaps = Math.max(0,
+          maxRunningMaps - assignedRequests.maps.size());
+      int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size();
+      int failedMapRequestLimit = Math.min(maxRequestedMaps,
+          numScheduledFailMaps);
+      int normalMapRequestLimit = Math.min(
+          maxRequestedMaps - failedMapRequestLimit,
+          numScheduledMaps - numScheduledFailMaps);
+      setRequestLimit(PRIORITY_FAST_FAIL_MAP, mapResourceRequest,
+          failedMapRequestLimit);
+      setRequestLimit(PRIORITY_MAP, mapResourceRequest, normalMapRequestLimit);
+    }
+
+    int numScheduledReduces = scheduledRequests.reduces.size();
+    if (maxRunningReduces > 0 && numScheduledReduces > 0) {
+      int maxRequestedReduces = Math.max(0,
+          maxRunningReduces - assignedRequests.reduces.size());
+      int reduceRequestLimit = Math.min(maxRequestedReduces,
+          numScheduledReduces);
+      setRequestLimit(PRIORITY_REDUCE, reduceResourceRequest,
+          reduceRequestLimit);
+    }
+  }
+
+  private boolean canAssignMaps() {
+    return (maxRunningMaps <= 0
+        || assignedRequests.maps.size() < maxRunningMaps);
+  }
+
+  private boolean canAssignReduces() {
+    return (maxRunningReduces <= 0
+        || assignedRequests.reduces.size() < maxRunningReduces);
+  }
+
   private void updateAMRMToken(Token token) throws IOException {
     org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken =
         new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token
@@ -1046,8 +1091,7 @@ public class RMContainerAllocator extends RMContainerRequestor
       it = allocatedContainers.iterator();
       while (it.hasNext()) {
         Container allocated = it.next();
-        LOG.info("Releasing unassigned and invalid container " 
-            + allocated + ". RM may have assignment issues");
+        LOG.info("Releasing unassigned container " + allocated);
         containerNotAssigned(allocated);
       }
     }
@@ -1150,7 +1194,8 @@ public class RMContainerAllocator extends RMContainerRequestor
     private ContainerRequest assignToFailedMap(Container allocated) {
       //try to assign to earlierFailedMaps if present
       ContainerRequest assigned = null;
-      while (assigned == null && earlierFailedMaps.size() > 0) {
+      while (assigned == null && earlierFailedMaps.size() > 0
+          && canAssignMaps()) {
         TaskAttemptId tId = earlierFailedMaps.removeFirst();      
         if (maps.containsKey(tId)) {
           assigned = maps.remove(tId);
@@ -1168,7 +1213,7 @@ public class RMContainerAllocator extends RMContainerRequestor
     private ContainerRequest assignToReduce(Container allocated) {
       ContainerRequest assigned = null;
       //try to assign to reduces if present
-      if (assigned == null && reduces.size() > 0) {
+      if (assigned == null && reduces.size() > 0 && canAssignReduces()) {
         TaskAttemptId tId = reduces.keySet().iterator().next();
         assigned = reduces.remove(tId);
         LOG.info("Assigned to reduce");
@@ -1180,7 +1225,7 @@ public class RMContainerAllocator extends RMContainerRequestor
     private void assignMapsWithLocality(List<Container> allocatedContainers) {
       // try to assign to all nodes first to match node local
       Iterator<Container> it = allocatedContainers.iterator();
-      while(it.hasNext() && maps.size() > 0){
+      while(it.hasNext() && maps.size() > 0 && canAssignMaps()){
         Container allocated = it.next();        
         Priority priority = allocated.getPriority();
         assert PRIORITY_MAP.equals(priority);
@@ -1212,7 +1257,7 @@ public class RMContainerAllocator extends RMContainerRequestor
       
       // try to match all rack local
       it = allocatedContainers.iterator();
-      while(it.hasNext() && maps.size() > 0){
+      while(it.hasNext() && maps.size() > 0 && canAssignMaps()){
         Container allocated = it.next();
         Priority priority = allocated.getPriority();
         assert PRIORITY_MAP.equals(priority);
@@ -1242,7 +1287,7 @@ public class RMContainerAllocator extends RMContainerRequestor
       
       // assign remaining
       it = allocatedContainers.iterator();
-      while(it.hasNext() && maps.size() > 0){
+      while(it.hasNext() && maps.size() > 0 && canAssignMaps()){
         Container allocated = it.next();
         Priority priority = allocated.getPriority();
         assert PRIORITY_MAP.equals(priority);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index bb9ad02..1666864 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
@@ -44,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -58,6 +60,8 @@ import com.google.common.annotations.VisibleForTesting;
 public abstract class RMContainerRequestor extends RMCommunicator {
   
   private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class);
+  private static final ResourceRequestComparator RESOURCE_REQUEST_COMPARATOR =
+      new ResourceRequestComparator();
 
   protected int lastResponseID;
   private Resource availableResources;
@@ -77,12 +81,18 @@ public abstract class RMContainerRequestor extends RMCommunicator {
   // use custom comparator to make sure ResourceRequest objects differing only in 
   // numContainers dont end up as duplicates
   private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
-      new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator());
+      RESOURCE_REQUEST_COMPARATOR);
   private final Set<ContainerId> release = new TreeSet<ContainerId>();
   // pendingRelease holds history or release requests.request is removed only if
   // RM sends completedContainer.
   // How it different from release? --> release is for per allocate() request.
   protected Set<ContainerId> pendingRelease = new TreeSet<ContainerId>();
+
+  private final Map<ResourceRequest,ResourceRequest> requestLimits =
+      new TreeMap<ResourceRequest,ResourceRequest>(RESOURCE_REQUEST_COMPARATOR);
+  private final Set<ResourceRequest> requestLimitsToUpdate =
+      new TreeSet<ResourceRequest>(RESOURCE_REQUEST_COMPARATOR);
+
   private boolean nodeBlacklistingEnabled;
   private int blacklistDisablePercent;
   private AtomicBoolean ignoreBlacklisting = new AtomicBoolean(false);
@@ -178,6 +188,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
 
   protected AllocateResponse makeRemoteRequest() throws YarnException,
       IOException {
+    applyRequestLimits();
     ResourceBlacklistRequest blacklistRequest =
         ResourceBlacklistRequest.newInstance(new ArrayList<String>(blacklistAdditions),
             new ArrayList<String>(blacklistRemovals));
@@ -190,13 +201,14 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     availableResources = allocateResponse.getAvailableResources();
     lastClusterNmCount = clusterNmCount;
     clusterNmCount = allocateResponse.getNumClusterNodes();
+    int numCompletedContainers =
+        allocateResponse.getCompletedContainersStatuses().size();
 
     if (ask.size() > 0 || release.size() > 0) {
       LOG.info("getResources() for " + applicationId + ":" + " ask="
           + ask.size() + " release= " + release.size() + " newContainers="
           + allocateResponse.getAllocatedContainers().size()
-          + " finishedContainers="
-          + allocateResponse.getCompletedContainersStatuses().size()
+          + " finishedContainers=" + numCompletedContainers
           + " resourcelimit=" + availableResources + " knownNMs="
           + clusterNmCount);
     }
@@ -204,6 +216,12 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     ask.clear();
     release.clear();
 
+    if (numCompletedContainers > 0) {
+      // re-send limited requests when a container completes to trigger asking
+      // for more containers
+      requestLimitsToUpdate.addAll(requestLimits.keySet());
+    }
+
     if (blacklistAdditions.size() > 0 || blacklistRemovals.size() > 0) {
       LOG.info("Update the blacklist for " + applicationId +
           ": blacklistAdditions=" + blacklistAdditions.size() +
@@ -214,6 +232,36 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     return allocateResponse;
   }
 
+  private void applyRequestLimits() {
+    Iterator<ResourceRequest> iter = requestLimits.values().iterator();
+    while (iter.hasNext()) {
+      ResourceRequest reqLimit = iter.next();
+      int limit = reqLimit.getNumContainers();
+      Map<String, Map<Resource, ResourceRequest>> remoteRequests =
+          remoteRequestsTable.get(reqLimit.getPriority());
+      Map<Resource, ResourceRequest> reqMap = (remoteRequests != null)
+          ? remoteRequests.get(ResourceRequest.ANY) : null;
+      ResourceRequest req = (reqMap != null)
+          ? reqMap.get(reqLimit.getCapability()) : null;
+      if (req == null) {
+        continue;
+      }
+      // update an existing ask or send a new one if updating
+      if (ask.remove(req) || requestLimitsToUpdate.contains(req)) {
+        ResourceRequest newReq = req.getNumContainers() > limit
+            ? reqLimit : req;
+        ask.add(newReq);
+        LOG.info("Applying ask limit of " + newReq.getNumContainers()
+            + " for priority:" + reqLimit.getPriority()
+            + " and capability:" + reqLimit.getCapability());
+      }
+      if (limit == Integer.MAX_VALUE) {
+        iter.remove();
+      }
+    }
+    requestLimitsToUpdate.clear();
+  }
+
   protected void addOutstandingRequestOnResync() {
     for (Map<String, Map<Resource, ResourceRequest>> rr : remoteRequestsTable
         .values()) {
@@ -229,6 +277,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     if (!pendingRelease.isEmpty()) {
       release.addAll(pendingRelease);
     }
+    requestLimitsToUpdate.addAll(requestLimits.keySet());
   }
 
   // May be incorrect if there's multiple NodeManagers running on a single host.
@@ -459,10 +508,8 @@ public abstract class RMContainerRequestor extends RMCommunicator {
   private void addResourceRequestToAsk(ResourceRequest remoteRequest) {
     // because objects inside the resource map can be deleted ask can end up 
     // containing an object that matches new resource object but with different
-    // numContainers. So exisintg values must be replaced explicitly
-    if(ask.contains(remoteRequest)) {
-      ask.remove(remoteRequest);
-    }
+    // numContainers. So existing values must be replaced explicitly
+    ask.remove(remoteRequest);
     ask.add(remoteRequest);    
   }
 
@@ -490,6 +537,19 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     return newReq;
   }
   
+  protected void setRequestLimit(Priority priority, Resource capability,
+      int limit) {
+    if (limit < 0) {
+      limit = Integer.MAX_VALUE;
+    }
+    ResourceRequest newReqLimit = ResourceRequest.newInstance(priority,
+        ResourceRequest.ANY, capability, limit);
+    ResourceRequest oldReqLimit = requestLimits.put(newReqLimit, newReqLimit);
+    if (oldReqLimit == null || oldReqLimit.getNumContainers() < limit) {
+      requestLimitsToUpdate.add(newReqLimit);
+    }
+  }
+
   public Set<String> getBlacklistedNodes() {
     return blacklistedNodes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 4759693..eca1a4d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -31,9 +31,11 @@ import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -81,7 +83,13 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -89,6 +97,10 @@ import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NMToken;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -2387,6 +2399,208 @@ public class TestRMContainerAllocator {
         new Text(rmAddr), ugiToken.getService());
   }
 
+  @Test
+  public void testConcurrentTaskLimits() throws Exception {
+    final int MAP_LIMIT = 3;
+    final int REDUCE_LIMIT = 1;
+    LOG.info("Running testConcurrentTaskLimits");
+    Configuration conf = new Configuration();
+    conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MAP_LIMIT);
+    conf.setInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, REDUCE_LIMIT);
+    conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0f);
+    ApplicationId appId = ApplicationId.newInstance(1, 1);
+    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
+        appId, 1);
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+    final MockScheduler mockScheduler = new MockScheduler(appAttemptId);
+    MyContainerAllocator allocator = new MyContainerAllocator(null, conf,
+        appAttemptId, mockJob) {
+          @Override
+          protected void register() {
+          }
+
+          @Override
+          protected ApplicationMasterProtocol createSchedulerProxy() {
+            return mockScheduler;
+          }
+    };
+
+    // create some map requests
+    ContainerRequestEvent[] reqMapEvents = new ContainerRequestEvent[5];
+    for (int i = 0; i < reqMapEvents.length; ++i) {
+      reqMapEvents[i] = createReq(jobId, i, 1024, new String[] { "h" + i });
+    }
+    allocator.sendRequests(Arrays.asList(reqMapEvents));
+
+    // create some reduce requests
+    ContainerRequestEvent[] reqReduceEvents = new ContainerRequestEvent[2];
+    for (int i = 0; i < reqReduceEvents.length; ++i) {
+      reqReduceEvents[i] = createReq(jobId, i, 1024, new String[] {},
+          false, true);
+    }
+    allocator.sendRequests(Arrays.asList(reqReduceEvents));
+    allocator.schedule();
+
+    // verify all of the host-specific asks were sent plus one for the
+    // default rack and one for the ANY request
+    Assert.assertEquals(reqMapEvents.length + 2, mockScheduler.lastAsk.size());
+
+    // verify AM is only asking for the map limit overall
+    Assert.assertEquals(MAP_LIMIT, mockScheduler.lastAnyAskMap);
+
+    // assign a map task and verify we do not ask for any more maps
+    ContainerId cid0 = mockScheduler.assignContainer("h0", false);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(2, mockScheduler.lastAnyAskMap);
+
+    // complete the map task and verify that we ask for one more
+    mockScheduler.completeContainer(cid0);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(3, mockScheduler.lastAnyAskMap);
+
+    // assign three more maps and verify we ask for no more maps
+    ContainerId cid1 = mockScheduler.assignContainer("h1", false);
+    ContainerId cid2 = mockScheduler.assignContainer("h2", false);
+    ContainerId cid3 = mockScheduler.assignContainer("h3", false);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(0, mockScheduler.lastAnyAskMap);
+
+    // complete two containers and verify we only asked for one more
+    // since at that point all maps should be scheduled/completed
+    mockScheduler.completeContainer(cid2);
+    mockScheduler.completeContainer(cid3);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(1, mockScheduler.lastAnyAskMap);
+
+    // allocate the last container and complete the first one
+    // and verify there are no more map asks.
+    mockScheduler.completeContainer(cid1);
+    ContainerId cid4 = mockScheduler.assignContainer("h4", false);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(0, mockScheduler.lastAnyAskMap);
+
+    // complete the last map
+    mockScheduler.completeContainer(cid4);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(0, mockScheduler.lastAnyAskMap);
+
+    // verify only reduce limit being requested
+    Assert.assertEquals(REDUCE_LIMIT, mockScheduler.lastAnyAskReduce);
+
+    // assign a reducer and verify ask goes to zero
+    cid0 = mockScheduler.assignContainer("h0", true);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
+
+    // complete the reducer and verify we ask for another
+    mockScheduler.completeContainer(cid0);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(1, mockScheduler.lastAnyAskReduce);
+
+    // assign a reducer and verify ask goes to zero
+    cid0 = mockScheduler.assignContainer("h0", true);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
+
+    // complete the reducer and verify no more reducers
+    mockScheduler.completeContainer(cid0);
+    allocator.schedule();
+    allocator.schedule();
+    Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
+    allocator.close();
+  }
+
+  private static class MockScheduler implements ApplicationMasterProtocol {
+    ApplicationAttemptId attemptId;
+    long nextContainerId = 10;
+    List<ResourceRequest> lastAsk = null;
+    int lastAnyAskMap = 0;
+    int lastAnyAskReduce = 0;
+    List<ContainerStatus> containersToComplete =
+        new ArrayList<ContainerStatus>();
+    List<Container> containersToAllocate = new ArrayList<Container>();
+
+    public MockScheduler(ApplicationAttemptId attemptId) {
+      this.attemptId = attemptId;
+    }
+
+    @Override
+    public RegisterApplicationMasterResponse registerApplicationMaster(
+        RegisterApplicationMasterRequest request) throws YarnException,
+        IOException {
+      return RegisterApplicationMasterResponse.newInstance(
+          Resource.newInstance(512, 1),
+          Resource.newInstance(512000, 1024),
+          Collections.<ApplicationAccessType,String>emptyMap(),
+          ByteBuffer.wrap("fake_key".getBytes()),
+          Collections.<Container>emptyList(),
+          "default",
+          Collections.<NMToken>emptyList());
+    }
+
+    @Override
+    public FinishApplicationMasterResponse finishApplicationMaster(
+        FinishApplicationMasterRequest request) throws YarnException,
+        IOException {
+      return FinishApplicationMasterResponse.newInstance(false);
+    }
+
+    @Override
+    public AllocateResponse allocate(AllocateRequest request)
+        throws YarnException, IOException {
+      lastAsk = request.getAskList();
+      for (ResourceRequest req : lastAsk) {
+        if (ResourceRequest.ANY.equals(req.getResourceName())) {
+          Priority priority = req.getPriority();
+          if (priority.equals(RMContainerAllocator.PRIORITY_MAP)) {
+            lastAnyAskMap = req.getNumContainers();
+          } else if (priority.equals(RMContainerAllocator.PRIORITY_REDUCE)){
+            lastAnyAskReduce = req.getNumContainers();
+          }
+        }
+      }
+      AllocateResponse response =  AllocateResponse.newInstance(
+          request.getResponseId(),
+          containersToComplete, containersToAllocate,
+          Collections.<NodeReport>emptyList(),
+          Resource.newInstance(512000, 1024), null, 10, null,
+          Collections.<NMToken>emptyList());
+      containersToComplete.clear();
+      containersToAllocate.clear();
+      return response;
+    }
+
+    public ContainerId assignContainer(String nodeName, boolean isReduce) {
+      ContainerId containerId =
+          ContainerId.newContainerId(attemptId, nextContainerId++);
+      Priority priority = isReduce ? RMContainerAllocator.PRIORITY_REDUCE
+          : RMContainerAllocator.PRIORITY_MAP;
+      Container container = Container.newInstance(containerId,
+          NodeId.newInstance(nodeName, 1234), nodeName + ":5678",
+        Resource.newInstance(1024, 1), priority, null);
+      containersToAllocate.add(container);
+      return containerId;
+    }
+
+    public void completeContainer(ContainerId containerId) {
+      containersToComplete.add(ContainerStatus.newInstance(containerId,
+          ContainerState.COMPLETE, "", 0));
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     TestRMContainerAllocator t = new TestRMContainerAllocator();
     t.testSimple();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index d06b075..5527103 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -373,6 +373,14 @@ public interface MRJobConfig {
 
   public static final String DEFAULT_JOB_ACL_MODIFY_JOB = " ";
   
+  public static final String JOB_RUNNING_MAP_LIMIT =
+      "mapreduce.job.running.map.limit";
+  public static final int DEFAULT_JOB_RUNNING_MAP_LIMIT = 0;
+
+  public static final String JOB_RUNNING_REDUCE_LIMIT =
+      "mapreduce.job.running.reduce.limit";
+  public static final int DEFAULT_JOB_RUNNING_REDUCE_LIMIT = 0;
+
   /* config for tracking the local file where all the credentials for the job
    * credentials.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c9b55e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 6e80679..d864756 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -83,6 +83,22 @@
 </property>
 
 <property>
+  <name>mapreduce.job.running.map.limit</name>
+  <value>0</value>
+  <description>The maximum number of simultaneous map tasks per job.
+  There is no limit if this value is 0 or negative.
+  </description>
+</property>
+
+<property>
+  <name>mapreduce.job.running.reduce.limit</name>
+  <value>0</value>
+  <description>The maximum number of simultaneous reduce tasks per job.
+  There is no limit if this value is 0 or negative.
+  </description>
+</property>
+
+<property>
   <name>mapreduce.job.reducer.preempt.delay.sec</name>
   <value>0</value>
   <description>The threshold in terms of seconds after which an unsatisfied mapper 


[18/50] [abbrv] hadoop git commit: YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)

Posted by ji...@apache.org.
YARN-3122. Metrics for container's actual CPU usage. (Anubhav Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97adb9aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97adb9aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97adb9aa

Branch: refs/heads/HDFS-7285
Commit: 97adb9aa39ef01e5d38039044cf90b351fd21c30
Parents: e93eee9
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Mar 4 17:33:30 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:24 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../apache/hadoop/yarn/util/CpuTimeTracker.java | 99 ++++++++++++++++++++
 .../util/LinuxResourceCalculatorPlugin.java     | 46 +++------
 .../yarn/util/ProcfsBasedProcessTree.java       | 77 ++++++++++++++-
 .../util/ResourceCalculatorProcessTree.java     | 12 ++-
 .../yarn/util/WindowsBasedProcessTree.java      |  7 +-
 .../util/TestLinuxResourceCalculatorPlugin.java |  4 +-
 .../yarn/util/TestProcfsBasedProcessTree.java   | 38 ++++++--
 .../util/TestResourceCalculatorProcessTree.java |  5 +
 .../monitor/ContainerMetrics.java               | 39 ++++++--
 .../monitor/ContainersMonitorImpl.java          | 18 ++++
 .../util/NodeManagerHardwareUtils.java          | 16 +++-
 12 files changed, 311 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 03bb20b..0b71bee 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -351,6 +351,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3272. Surface container locality info in RM web UI.
     (Jian He via wangda)
 
+    YARN-3122. Metrics for container's actual CPU usage. 
+    (Anubhav Dhoot via kasha)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
new file mode 100644
index 0000000..d36848e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/CpuTimeTracker.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.math.BigInteger;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class CpuTimeTracker {
+  public static final int UNAVAILABLE = -1;
+  final long MINIMUM_UPDATE_INTERVAL;
+
+  // CPU used time since system is on (ms)
+  BigInteger cumulativeCpuTime = BigInteger.ZERO;
+
+  // CPU used time read last time (ms)
+  BigInteger lastCumulativeCpuTime = BigInteger.ZERO;
+
+  // Unix timestamp while reading the CPU time (ms)
+  long sampleTime;
+  long lastSampleTime;
+  float cpuUsage;
+  BigInteger jiffyLengthInMillis;
+
+  public CpuTimeTracker(long jiffyLengthInMillis) {
+    this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis);
+    this.cpuUsage = UNAVAILABLE;
+    this.sampleTime = UNAVAILABLE;
+    this.lastSampleTime = UNAVAILABLE;
+    MINIMUM_UPDATE_INTERVAL =  10 * jiffyLengthInMillis;
+  }
+
+  /**
+   * Return percentage of cpu time spent over the time since last update.
+   * CPU time spent is based on elapsed jiffies multiplied by amount of
+   * time for 1 core. Thus, if you use 2 cores completely you would have spent
+   * twice the actual time between updates and this will return 200%.
+   *
+   * @return Return percentage of cpu usage since last update, {@link
+   * CpuTimeTracker#UNAVAILABLE} if there haven't been 2 updates more than
+   * {@link CpuTimeTracker#MINIMUM_UPDATE_INTERVAL} apart
+   */
+  public float getCpuTrackerUsagePercent() {
+    if (lastSampleTime == UNAVAILABLE ||
+        lastSampleTime > sampleTime) {
+      // lastSampleTime > sampleTime may happen when the system time is changed
+      lastSampleTime = sampleTime;
+      lastCumulativeCpuTime = cumulativeCpuTime;
+      return cpuUsage;
+    }
+    // When lastSampleTime is sufficiently old, update cpuUsage.
+    // Also take a sample of the current time and cumulative CPU time for the
+    // use of the next calculation.
+    if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
+      cpuUsage =
+          ((cumulativeCpuTime.subtract(lastCumulativeCpuTime)).floatValue())
+          * 100F / ((float) (sampleTime - lastSampleTime));
+      lastSampleTime = sampleTime;
+      lastCumulativeCpuTime = cumulativeCpuTime;
+    }
+    return cpuUsage;
+  }
+
+  public void updateElapsedJiffies(BigInteger elapedJiffies, long sampleTime) {
+    this.cumulativeCpuTime = elapedJiffies.multiply(jiffyLengthInMillis);
+    this.sampleTime = sampleTime;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("SampleTime " + this.sampleTime);
+    sb.append(" CummulativeCpuTime " + this.cumulativeCpuTime);
+    sb.append(" LastSampleTime " + this.lastSampleTime);
+    sb.append(" LastCummulativeCpuTime " + this.lastCumulativeCpuTime);
+    sb.append(" CpuUsage " + this.cpuUsage);
+    sb.append(" JiffyLengthMillisec " + this.jiffyLengthInMillis);
+    return sb.toString();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
index 2347f40..ab1511a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LinuxResourceCalculatorPlugin.java
@@ -23,6 +23,7 @@ import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.InputStreamReader;
 import java.io.IOException;
+import java.math.BigInteger;
 import java.nio.charset.Charset;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -41,8 +42,6 @@ public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
   private static final Log LOG =
       LogFactory.getLog(LinuxResourceCalculatorPlugin.class);
 
-  public static final int UNAVAILABLE = -1;
-
   /**
    * proc's meminfo virtual file has keys-values in the format
    * "key:[ \t]*value[ \t]kB".
@@ -74,6 +73,7 @@ public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
   private static final Pattern CPU_TIME_FORMAT =
     Pattern.compile("^cpu[ \t]*([0-9]*)" +
     		            "[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
+  private CpuTimeTracker cpuTimeTracker;
 
   private String procfsMemFile;
   private String procfsCpuFile;
@@ -87,12 +87,6 @@ public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
   private long inactiveSize = 0; // inactive cache memory (kB)
   private int numProcessors = 0; // number of processors on the system
   private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
-  private long cumulativeCpuTime = 0L; // CPU used time since system is on (ms)
-  private long lastCumulativeCpuTime = 0L; // CPU used time read last time (ms)
-  // Unix timestamp while reading the CPU time (ms)
-  private float cpuUsage = UNAVAILABLE;
-  private long sampleTime = UNAVAILABLE;
-  private long lastSampleTime = UNAVAILABLE;
 
   boolean readMemInfoFile = false;
   boolean readCpuInfoFile = false;
@@ -106,10 +100,8 @@ public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
   }
 
   public LinuxResourceCalculatorPlugin() {
-    procfsMemFile = PROCFS_MEMFILE;
-    procfsCpuFile = PROCFS_CPUINFO;
-    procfsStatFile = PROCFS_STAT;
-    jiffyLengthInMillis = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS;
+    this(PROCFS_MEMFILE, PROCFS_CPUINFO, PROCFS_STAT,
+        ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS);
   }
 
   /**
@@ -128,6 +120,7 @@ public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
     this.procfsCpuFile = procfsCpuFile;
     this.procfsStatFile = procfsStatFile;
     this.jiffyLengthInMillis = jiffyLengthInMillis;
+    this.cpuTimeTracker = new CpuTimeTracker(jiffyLengthInMillis);
   }
 
   /**
@@ -276,12 +269,13 @@ public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
           long uTime = Long.parseLong(mat.group(1));
           long nTime = Long.parseLong(mat.group(2));
           long sTime = Long.parseLong(mat.group(3));
-          cumulativeCpuTime = uTime + nTime + sTime; // milliseconds
+          cpuTimeTracker.updateElapsedJiffies(
+              BigInteger.valueOf(uTime + nTime + sTime),
+              getCurrentTime());
           break;
         }
         str = in.readLine();
       }
-      cumulativeCpuTime *= jiffyLengthInMillis;
     } catch (IOException io) {
       LOG.warn("Error reading the stream " + io);
     } finally {
@@ -345,32 +339,18 @@ public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
   @Override
   public long getCumulativeCpuTime() {
     readProcStatFile();
-    return cumulativeCpuTime;
+    return cpuTimeTracker.cumulativeCpuTime.longValue();
   }
 
   /** {@inheritDoc} */
   @Override
   public float getCpuUsage() {
     readProcStatFile();
-    sampleTime = getCurrentTime();
-    if (lastSampleTime == UNAVAILABLE ||
-        lastSampleTime > sampleTime) {
-      // lastSampleTime > sampleTime may happen when the system time is changed
-      lastSampleTime = sampleTime;
-      lastCumulativeCpuTime = cumulativeCpuTime;
-      return cpuUsage;
-    }
-    // When lastSampleTime is sufficiently old, update cpuUsage.
-    // Also take a sample of the current time and cumulative CPU time for the
-    // use of the next calculation.
-    final long MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis;
-    if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
-	    cpuUsage = (float)(cumulativeCpuTime - lastCumulativeCpuTime) * 100F /
-	               ((float)(sampleTime - lastSampleTime) * getNumProcessors());
-	    lastSampleTime = sampleTime;
-      lastCumulativeCpuTime = cumulativeCpuTime;
+    float overallCpuUsage = cpuTimeTracker.getCpuTrackerUsagePercent();
+    if (overallCpuUsage != CpuTimeTracker.UNAVAILABLE) {
+      overallCpuUsage = overallCpuUsage / getNumProcessors();
     }
-    return cpuUsage;
+    return overallCpuUsage;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 69aa96d..134cec2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -66,6 +66,8 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
   public static final String PROCFS_CMDLINE_FILE = "cmdline";
   public static final long PAGE_SIZE;
   public static final long JIFFY_LENGTH_IN_MILLIS; // in millisecond
+  private final CpuTimeTracker cpuTimeTracker;
+  private Clock clock;
 
   enum MemInfo {
     SIZE("Size"), RSS("Rss"), PSS("Pss"), SHARED_CLEAN("Shared_Clean"),
@@ -144,7 +146,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
     new HashMap<String, ProcessInfo>();
 
   public ProcfsBasedProcessTree(String pid) {
-    this(pid, PROCFS);
+    this(pid, PROCFS, new SystemClock());
   }
 
   @Override
@@ -157,6 +159,10 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
     }
   }
 
+  public ProcfsBasedProcessTree(String pid, String procfsDir) {
+    this(pid, procfsDir, new SystemClock());
+  }
+
   /**
    * Build a new process tree rooted at the pid.
    *
@@ -165,11 +171,14 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
    *
    * @param pid root of the process tree
    * @param procfsDir the root of a proc file system - only used for testing.
+   * @param clock clock for controlling time for testing
    */
-  public ProcfsBasedProcessTree(String pid, String procfsDir) {
+  public ProcfsBasedProcessTree(String pid, String procfsDir, Clock clock) {
     super(pid);
+    this.clock = clock;
     this.pid = getValidPID(pid);
     this.procfsDir = procfsDir;
+    this.cpuTimeTracker = new CpuTimeTracker(JIFFY_LENGTH_IN_MILLIS);
   }
 
   /**
@@ -447,6 +456,26 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
     return cpuTime;
   }
 
+  private BigInteger getTotalProcessJiffies() {
+    BigInteger totalStime = BigInteger.ZERO;
+    long totalUtime = 0;
+    for (ProcessInfo p : processTree.values()) {
+      if (p != null) {
+        totalUtime += p.getUtime();
+        totalStime = totalStime.add(p.getStime());
+      }
+    }
+    return totalStime.add(BigInteger.valueOf(totalUtime));
+  }
+
+  @Override
+  public float getCpuUsagePercent() {
+    BigInteger processTotalJiffies = getTotalProcessJiffies();
+    cpuTimeTracker.updateElapsedJiffies(processTotalJiffies,
+        clock.getTime());
+    return cpuTimeTracker.getCpuTrackerUsagePercent();
+  }
+
   private static String getValidPID(String pid) {
     if (pid == null) return deadPid;
     Matcher m = numberPattern.matcher(pid);
@@ -962,4 +991,48 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
       return sb.toString();
     }
   }
+
+  /**
+   * Test the {@link ProcfsBasedProcessTree}
+   *
+   * @param args
+   */
+  public static void main(String[] args) {
+    if (args.length != 1) {
+      System.out.println("Provide <pid of process to monitor>");
+      return;
+    }
+
+    int numprocessors =
+        ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, null)
+            .getNumProcessors();
+    System.out.println("Number of processors " + numprocessors);
+
+    System.out.println("Creating ProcfsBasedProcessTree for process " +
+        args[0]);
+    ProcfsBasedProcessTree procfsBasedProcessTree = new
+        ProcfsBasedProcessTree(args[0]);
+    procfsBasedProcessTree.updateProcessTree();
+
+    System.out.println(procfsBasedProcessTree.getProcessTreeDump());
+    System.out.println("Get cpu usage " + procfsBasedProcessTree
+        .getCpuUsagePercent());
+
+    try {
+      // Sleep so we can compute the CPU usage
+      Thread.sleep(500L);
+    } catch (InterruptedException e) {
+      // do nothing
+    }
+
+    procfsBasedProcessTree.updateProcessTree();
+
+    System.out.println(procfsBasedProcessTree.getProcessTreeDump());
+    System.out.println("Cpu usage  " + procfsBasedProcessTree
+        .getCpuUsagePercent());
+    System.out.println("Vmem usage in bytes " + procfsBasedProcessTree
+        .getCumulativeVmem());
+    System.out.println("Rss mem usage in bytes " + procfsBasedProcessTree
+        .getCumulativeRssmem());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 85f6f1a..8c22c9e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -108,13 +108,23 @@ public abstract class ResourceCalculatorProcessTree extends Configured {
 
   /**
    * Get the CPU time in millisecond used by all the processes in the
-   * process-tree since the process-tree created
+   * process-tree since the process-tree was created
    *
    * @return cumulative CPU time in millisecond since the process-tree created
    *         return 0 if it cannot be calculated
    */
   public abstract long getCumulativeCpuTime();
 
+  /**
+   * Get the CPU usage by all the processes in the process-tree based on
+   * average between samples as a ratio of overall CPU cycles similar to top.
+   * Thus, if 2 out of 4 cores are used this should return 200.0.
+   *
+   * @return percentage CPU usage since the process-tree was created
+   *         return {@link CpuTimeTracker#UNAVAILABLE} if it cannot be calculated
+   */
+  public abstract float getCpuUsagePercent();
+
   /** Verify that the tree process id is same as its process group id.
    * @return true if the process id matches else return false.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
index 143d236..5c3251f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
@@ -34,7 +34,7 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
 
   static final Log LOG = LogFactory
       .getLog(WindowsBasedProcessTree.class);
-  
+
   static class ProcessInfo {
     String pid; // process pid
     long vmem; // virtual memory
@@ -202,4 +202,9 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
     return cpuTimeMs;
   }
 
+  @Override
+  public float getCpuUsagePercent() {
+    return CpuTimeTracker.UNAVAILABLE;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
index c9a33d0..ad09fdf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLinuxResourceCalculatorPlugin.java
@@ -171,8 +171,8 @@ public class TestLinuxResourceCalculatorPlugin {
     updateStatFile(uTime, nTime, sTime);
     assertEquals(plugin.getCumulativeCpuTime(),
                  FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
-    assertEquals(plugin.getCpuUsage(), (float)(LinuxResourceCalculatorPlugin.UNAVAILABLE),0.0);
-    
+    assertEquals(plugin.getCpuUsage(), (float)(CpuTimeTracker.UNAVAILABLE),0.0);
+
     // Advance the time and sample again to test the CPU usage calculation
     uTime += 100L;
     plugin.advanceTime(200L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 7719255..d62e21d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -236,8 +236,8 @@ public class TestProcfsBasedProcessTree {
   }
 
   protected ProcfsBasedProcessTree createProcessTree(String pid,
-      String procfsRootDir) {
-    return new ProcfsBasedProcessTree(pid, procfsRootDir);
+      String procfsRootDir, Clock clock) {
+    return new ProcfsBasedProcessTree(pid, procfsRootDir, clock);
   }
 
   protected void destroyProcessTree(String pid) throws IOException {
@@ -388,6 +388,8 @@ public class TestProcfsBasedProcessTree {
 
     // test processes
     String[] pids = { "100", "200", "300", "400" };
+    ControlledClock testClock = new ControlledClock(new SystemClock());
+    testClock.setTime(0);
     // create the fake procfs root directory.
     File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
 
@@ -422,7 +424,7 @@ public class TestProcfsBasedProcessTree {
       // crank up the process tree class.
       Configuration conf = new Configuration();
       ProcfsBasedProcessTree processTree =
-          createProcessTree("100", procfsRootDir.getAbsolutePath());
+          createProcessTree("100", procfsRootDir.getAbsolutePath(), testClock);
       processTree.setConf(conf);
       // build the process tree.
       processTree.updateProcessTree();
@@ -444,6 +446,12 @@ public class TestProcfsBasedProcessTree {
               ? 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
       Assert.assertEquals("Cumulative cpu time does not match", cumuCpuTime,
         processTree.getCumulativeCpuTime());
+
+      // verify CPU usage
+      Assert.assertEquals("Percent CPU time should be set to -1 initially",
+          -1.0, processTree.getCpuUsagePercent(),
+          0.01);
+
       // Check by enabling smaps
       setSmapsInProceTree(processTree, true);
       // RSS=Min(shared_dirty,PSS)+PrivateClean+PrivateDirty (exclude r-xs,
@@ -460,15 +468,31 @@ public class TestProcfsBasedProcessTree {
               "100", "200000", "200", "3000", "500" });
       writeStatFiles(procfsRootDir, pids, procInfos, memInfo);
 
+      long elapsedTimeBetweenUpdatesMsec = 200000;
+      testClock.setTime(elapsedTimeBetweenUpdatesMsec);
       // build the process tree.
       processTree.updateProcessTree();
 
       // verify cumulative cpu time again
+      long prevCumuCpuTime = cumuCpuTime;
       cumuCpuTime =
           ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0
               ? 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
       Assert.assertEquals("Cumulative cpu time does not match", cumuCpuTime,
         processTree.getCumulativeCpuTime());
+
+      double expectedCpuUsagePercent =
+          (ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0) ?
+              (cumuCpuTime - prevCumuCpuTime) * 100.0 /
+                  elapsedTimeBetweenUpdatesMsec : 0;
+      // expectedCpuUsagePercent is given by (94000L - 72000) * 100/
+      //    200000;
+      // which in this case is 11. Lets verify that first
+      Assert.assertEquals(11, expectedCpuUsagePercent, 0.001);
+      Assert.assertEquals("Percent CPU time is not correct expected " +
+              expectedCpuUsagePercent, expectedCpuUsagePercent,
+          processTree.getCpuUsagePercent(),
+          0.01);
     } finally {
       FileUtil.fullyDelete(procfsRootDir);
     }
@@ -535,7 +559,8 @@ public class TestProcfsBasedProcessTree {
 
       // crank up the process tree class.
       ProcfsBasedProcessTree processTree =
-          createProcessTree("100", procfsRootDir.getAbsolutePath());
+          createProcessTree("100", procfsRootDir.getAbsolutePath(),
+              new SystemClock());
       setSmapsInProceTree(processTree, smapEnabled);
 
       // verify cumulative memory
@@ -672,7 +697,7 @@ public class TestProcfsBasedProcessTree {
       setupProcfsRootDir(procfsRootDir);
 
       // crank up the process tree class.
-      createProcessTree(pid, procfsRootDir.getAbsolutePath());
+      createProcessTree(pid, procfsRootDir.getAbsolutePath(), new SystemClock());
 
       // Let us not create stat file for pid 100.
       Assert.assertTrue(ProcfsBasedProcessTree.checkPidPgrpidForMatch(pid,
@@ -741,7 +766,8 @@ public class TestProcfsBasedProcessTree {
       writeCmdLineFiles(procfsRootDir, pids, cmdLines);
 
       ProcfsBasedProcessTree processTree =
-          createProcessTree("100", procfsRootDir.getAbsolutePath());
+          createProcessTree("100", procfsRootDir.getAbsolutePath(),
+              new SystemClock());
       // build the process tree.
       processTree.updateProcessTree();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
index 32ceb23..eaf7e8e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestResourceCalculatorProcessTree.java
@@ -53,6 +53,11 @@ public class TestResourceCalculatorProcessTree {
       return 0;
     }
 
+    @Override
+    public float getCpuUsagePercent() {
+      return CpuTimeTracker.UNAVAILABLE;
+    }
+
     public boolean checkPidPgrpidForMatch() {
       return false;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
index 7850688..1375da8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
@@ -42,14 +42,29 @@ import static org.apache.hadoop.metrics2.lib.Interns.info;
 @Metrics(context="container")
 public class ContainerMetrics implements MetricsSource {
 
-  public static final String PMEM_LIMIT_METRIC_NAME = "pMemLimit";
-  public static final String VMEM_LIMIT_METRIC_NAME = "vMemLimit";
+  public static final String PMEM_LIMIT_METRIC_NAME = "pMemLimitMBs";
+  public static final String VMEM_LIMIT_METRIC_NAME = "vMemLimitMBs";
   public static final String VCORE_LIMIT_METRIC_NAME = "vCoreLimit";
-  public static final String PMEM_USAGE_METRIC_NAME = "pMemUsage";
+  public static final String PMEM_USAGE_METRIC_NAME = "pMemUsageMBs";
+  private static final String PHY_CPU_USAGE_METRIC_NAME = "pCpuUsagePercent";
+
+  // Use a multiplier of 1000 to avoid losing too much precision when
+  // converting to integers
+  private static final String VCORE_USAGE_METRIC_NAME = "milliVcoreUsage";
 
   @Metric
   public MutableStat pMemMBsStat;
 
+  // This tracks overall CPU percentage of the machine in terms of percentage
+  // of 1 core similar to top
+  // Thus if you use 2 cores completely out of 4 available cores this value
+  // will be 200
+  @Metric
+  public MutableStat cpuCoreUsagePercent;
+
+  @Metric
+  public MutableStat milliVcoresUsed;
+
   @Metric
   public MutableGaugeInt pMemLimitMbs;
 
@@ -57,7 +72,7 @@ public class ContainerMetrics implements MetricsSource {
   public MutableGaugeInt vMemLimitMbs;
 
   @Metric
-  public MutableGaugeInt cpuVcores;
+  public MutableGaugeInt cpuVcoreLimit;
 
   static final MetricsInfo RECORD_INFO =
       info("ContainerResource", "Resource limit and usage by container");
@@ -95,11 +110,17 @@ public class ContainerMetrics implements MetricsSource {
 
     this.pMemMBsStat = registry.newStat(
         PMEM_USAGE_METRIC_NAME, "Physical memory stats", "Usage", "MBs", true);
+    this.cpuCoreUsagePercent = registry.newStat(
+        PHY_CPU_USAGE_METRIC_NAME, "Physical Cpu core percent usage stats",
+        "Usage", "Percents", true);
+    this.milliVcoresUsed = registry.newStat(
+        VCORE_USAGE_METRIC_NAME, "1000 times Vcore usage", "Usage",
+        "MilliVcores", true);
     this.pMemLimitMbs = registry.newGauge(
         PMEM_LIMIT_METRIC_NAME, "Physical memory limit in MBs", 0);
     this.vMemLimitMbs = registry.newGauge(
         VMEM_LIMIT_METRIC_NAME, "Virtual memory limit in MBs", 0);
-    this.cpuVcores = registry.newGauge(
+    this.cpuVcoreLimit = registry.newGauge(
         VCORE_LIMIT_METRIC_NAME, "CPU limit in number of vcores", 0);
   }
 
@@ -170,6 +191,12 @@ public class ContainerMetrics implements MetricsSource {
     this.pMemMBsStat.add(memoryMBs);
   }
 
+  public void recordCpuUsage(
+      int totalPhysicalCpuPercent, int milliVcoresUsed) {
+    this.cpuCoreUsagePercent.add(totalPhysicalCpuPercent);
+    this.milliVcoresUsed.add(milliVcoresUsed);
+  }
+
   public void recordProcessId(String processId) {
     registry.tag(PROCESSID_INFO, processId);
   }
@@ -177,7 +204,7 @@ public class ContainerMetrics implements MetricsSource {
   public void recordResourceLimit(int vmemLimit, int pmemLimit, int cpuVcores) {
     this.vMemLimitMbs.set(vmemLimit);
     this.pMemLimitMbs.set(pmemLimit);
-    this.cpuVcores.set(cpuVcores);
+    this.cpuVcoreLimit.set(cpuVcores);
   }
 
   private synchronized void scheduleTimerTaskIfRequired() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 2cecda6..b587e46 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerKillEvent;
+import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
 import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
 import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
 
@@ -75,6 +76,7 @@ public class ContainersMonitorImpl extends AbstractService implements
   private long maxVCoresAllottedForContainers;
 
   private static final long UNKNOWN_MEMORY_LIMIT = -1L;
+  private int nodeCpuPercentageForYARN;
 
   public ContainersMonitorImpl(ContainerExecutor exec,
       AsyncDispatcher dispatcher, Context context) {
@@ -145,6 +147,9 @@ public class ContainersMonitorImpl extends AbstractService implements
     LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
     LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
 
+    nodeCpuPercentageForYARN =
+        NodeManagerHardwareUtils.getNodeCpuPercentage(conf);
+
     if (pmemCheckEnabled) {
       // Logging if actual pmem cannot be determined.
       long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
@@ -434,6 +439,16 @@ public class ContainersMonitorImpl extends AbstractService implements
             pTree.updateProcessTree();    // update process-tree
             long currentVmemUsage = pTree.getCumulativeVmem();
             long currentPmemUsage = pTree.getCumulativeRssmem();
+            // if machine has 6 cores and 3 are used,
+            // cpuUsagePercentPerCore should be 300% and
+            // cpuUsageTotalCoresPercentage should be 50%
+            float cpuUsagePercentPerCore = pTree.getCpuUsagePercent();
+            float cpuUsageTotalCoresPercentage = cpuUsagePercentPerCore /
+                resourceCalculatorPlugin.getNumProcessors();
+
+            // Multiply by 1000 to avoid losing data when converting to int
+            int milliVcoresUsed = (int) (cpuUsageTotalCoresPercentage * 1000
+                * maxVCoresAllottedForContainers /nodeCpuPercentageForYARN);
             // as processes begin with an age 1, we want to see if there
             // are processes more than 1 iteration old.
             long curMemUsageOfAgedProcesses = pTree.getCumulativeVmem(1);
@@ -451,6 +466,9 @@ public class ContainersMonitorImpl extends AbstractService implements
               ContainerMetrics.forContainer(
                   containerId, containerMetricsPeriodMs).recordMemoryUsage(
                   (int) (currentPmemUsage >> 20));
+              ContainerMetrics.forContainer(
+                  containerId, containerMetricsPeriodMs).recordCpuUsage
+                  ((int)cpuUsagePercentPerCore, milliVcoresUsed);
             }
 
             boolean isMemoryOverLimit = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97adb9aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
index 07cf698..431cf5d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
@@ -59,6 +59,19 @@ public class NodeManagerHardwareUtils {
   public static float getContainersCores(ResourceCalculatorPlugin plugin,
       Configuration conf) {
     int numProcessors = plugin.getNumProcessors();
+    int nodeCpuPercentage = getNodeCpuPercentage(conf);
+
+    return (nodeCpuPercentage * numProcessors) / 100.0f;
+  }
+
+  /**
+   * Gets the percentage of physical CPU that is configured for YARN containers
+   * This is percent > 0 and <= 100  based on
+   * YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
+   * @param conf Configuration object
+   * @return percent > 0 and <= 100
+   */
+  public static int getNodeCpuPercentage(Configuration conf) {
     int nodeCpuPercentage =
         Math.min(conf.getInt(
           YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
@@ -73,7 +86,6 @@ public class NodeManagerHardwareUtils {
               + ". Value cannot be less than or equal to 0.";
       throw new IllegalArgumentException(message);
     }
-
-    return (nodeCpuPercentage * numProcessors) / 100.0f;
+    return nodeCpuPercentage;
   }
 }


[14/50] [abbrv] hadoop git commit: Move HADOOP-6857 to 3.0.0.

Posted by ji...@apache.org.
Move HADOOP-6857 to 3.0.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7814f50e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7814f50e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7814f50e

Branch: refs/heads/HDFS-7285
Commit: 7814f50e2254460602bc00c81313d07678335451
Parents: 0ac995e
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Mar 3 21:52:37 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7814f50e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cb5cd4d..d518d9f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -13,6 +13,9 @@ Trunk (Unreleased)
 
     HADOOP-10950. rework heap management vars (John Smith via aw)
 
+    HADOOP-6857. FsShell should report raw disk usage including replication
+    factor. (Byron Wong via shv)
+
     HADOOP-11657. Align the output of `hadoop fs -du` to be more Unix-like.
     (aajisaka)
 
@@ -465,9 +468,6 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9)
 
-    HADOOP-6857. FsShell should report raw disk usage including replication
-    factor. (Byron Wong via shv)
-
     HADOOP-10847. Remove the usage of sun.security.x509.* in testing code.
     (Pascal Oliva via wheat9)
 


[36/50] [abbrv] hadoop git commit: HDFS-6488. Support HDFS superuser in NFSv3 gateway. Contributed by Brandon Li

Posted by ji...@apache.org.
HDFS-6488. Support HDFS superuser in NFSv3 gateway. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2f91d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2f91d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2f91d9b

Branch: refs/heads/HDFS-7285
Commit: a2f91d9b0c5131b2f76d4852e9c649e446b873bf
Parents: dfc015f
Author: Brandon Li <br...@apache.org>
Authored: Fri Mar 6 15:19:45 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:26 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java     | 14 +++++++
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    | 12 +++++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../src/site/markdown/HdfsNfsGateway.md         | 44 +++++++++++++++++---
 4 files changed, 64 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f91d9b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 9e4aaf5..09ee579 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -73,4 +73,18 @@ public class NfsConfigKeys {
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = "nfs.metrics.percentiles.intervals";
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = "";
+  
+  /*
+   * HDFS super-user is the user with the same identity as NameNode process
+   * itself and the super-user can do anything in that permissions checks never
+   * fail for the super-user. If the following property is configured, the
+   * superuser on NFS client can access any file on HDFS. By default, the super
+   * user is not configured in the gateway. Note that, even the the superuser is
+   * configured, "nfs.exports.allowed.hosts" still takes effect. For example,
+   * the superuser will not have write access to HDFS files through the gateway
+   * if the NFS client host is not allowed to have write access in
+   * "nfs.exports.allowed.hosts".
+   */
+  public static final String  NFS_SUPERUSER_KEY = "nfs.superuser";
+  public static final String  NFS_SUPERUSER_DEFAULT = "";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f91d9b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 05d0674..268abba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Options;
@@ -166,6 +165,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   private JvmPauseMonitor pauseMonitor;
   private Nfs3HttpServer infoServer = null;
   static Nfs3Metrics metrics;
+  private String superuser;
 
   public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket,
       boolean allowInsecurePorts) throws IOException {
@@ -200,6 +200,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     UserGroupInformation.setConfiguration(config);
     SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
         NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
+    superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY,
+        NfsConfigKeys.NFS_SUPERUSER_DEFAULT);
+    LOG.info("Configured HDFS superuser is " + superuser);
 
     if (!enableDump) {
       writeDumpDir = null;
@@ -583,13 +586,18 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     try {
-      // HDFS-5804 removed supserUserClient access
       attrs = writeManager.getFileAttr(dfsClient, handle, iug);
 
       if (attrs == null) {
         LOG.error("Can't get path for fileId: " + handle.getFileId());
         return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
       }
+      if(iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) {
+        int access = Nfs3Constant.ACCESS3_LOOKUP | Nfs3Constant.ACCESS3_DELETE
+            | Nfs3Constant.ACCESS3_EXECUTE | Nfs3Constant.ACCESS3_EXTEND
+            | Nfs3Constant.ACCESS3_MODIFY | Nfs3Constant.ACCESS3_READ;
+        return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
+      }
       int access = Nfs3Utils.getAccessRightsForUserGroup(
           securityHandler.getUid(), securityHandler.getGid(),
           securityHandler.getAuxGids(), attrs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f91d9b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b443902..29717e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -348,6 +348,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7656. Expose truncate API for HDFS httpfs. (yliu)
 
+    HDFS-6488. Support HDFS superuser in NFS gateway. (brandonli)
+
   IMPROVEMENTS
 
     HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2f91d9b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index cea491f..e6666d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -80,14 +80,33 @@ The above are the only required configuration for the NFS gateway in non-secure
 
 The rest of the NFS gateway configurations are optional for both secure and non-secure mode.
 
-The AIX NFS client has a [few known issues](https://issues.apache.org/jira/browse/HDFS-6549) that prevent it from working correctly by default with the HDFS NFS Gateway. If you want to be able to access the HDFS NFS Gateway from AIX, you should set the following configuration setting to enable work-arounds for these issues:
+*   The AIX NFS client has a [few known issues](https://issues.apache.org/jira/browse/HDFS-6549)
+    that prevent it from working correctly by default with the HDFS NFS Gateway. If you want to
+    be able to access the HDFS NFS Gateway from AIX, you should set the following configuration
+    setting to enable work-arounds for these issues:
 
-    <property>
-      <name>nfs.aix.compatibility.mode.enabled</name>
-      <value>true</value>
-    </property>
+        <property>
+          <name>nfs.aix.compatibility.mode.enabled</name>
+          <value>true</value>
+        </property>
+
+    Note that regular, non-AIX clients should NOT enable AIX compatibility mode. The work-arounds
+    implemented by AIX compatibility mode effectively disable safeguards to ensure that listing
+    of directory contents via NFS returns consistent results, and that all data sent to the NFS
+    server can be assured to have been committed.
+
+*   HDFS super-user is the user with the same identity as NameNode process itself and
+    the super-user can do anything in that permissions checks never fail for the super-user. 
+    If the following property is configured, the superuser on NFS client can access any file
+    on HDFS. By default, the super user is not configured in the gateway.
+    Note that, even the the superuser is configured, "nfs.exports.allowed.hosts" still takes effect. 
+    For example, the superuser will not have write access to HDFS files through the gateway if
+    the NFS client host is not allowed to have write access in "nfs.exports.allowed.hosts".
 
-Note that regular, non-AIX clients should NOT enable AIX compatibility mode. The work-arounds implemented by AIX compatibility mode effectively disable safeguards to ensure that listing of directory contents via NFS returns consistent results, and that all data sent to the NFS server can be assured to have been committed.
+        <property>
+          <name>nfs.superuser</name>
+          <value>the_name_of_hdfs_superuser</value>
+        </property>
 
 It's strongly recommended for the users to update a few configuration properties based on their use cases. All the following configuration properties can be added or updated in hdfs-site.xml.
 
@@ -135,6 +154,19 @@ It's strongly recommended for the users to update a few configuration properties
           <value>* rw</value>
         </property>
 
+*   HDFS super-user is the user with the same identity as NameNode process itself and
+    the super-user can do anything in that permissions checks never fail for the super-user. 
+    If the following property is configured, the superuser on NFS client can access any file
+    on HDFS. By default, the super user is not configured in the gateway.
+    Note that, even the the superuser is configured, "nfs.exports.allowed.hosts" still takes effect. 
+    For example, the superuser will not have write access to HDFS files through the gateway if
+    the NFS client host is not allowed to have write access in "nfs.exports.allowed.hosts".
+
+        <property>
+          <name>nfs.superuser</name>
+          <value>the_name_of_hdfs_superuser</value>
+        </property>
+
 *   JVM and log settings. You can export JVM settings (e.g., heap size and GC log) in
     HADOOP\_NFS3\_OPTS. More NFS related settings can be found in hadoop-env.sh.
     To get NFS debug trace, you can edit the log4j.property file


[09/50] [abbrv] hadoop git commit: YARN-3131. YarnClientImpl should check FAILED and KILLED state in submitApplication. Contributed by Chang Li

Posted by ji...@apache.org.
YARN-3131. YarnClientImpl should check FAILED and KILLED state in submitApplication. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf3604b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf3604b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf3604b5

Branch: refs/heads/HDFS-7285
Commit: bf3604b53af4243c0ea0b4fd3ef398c2b7eaf450
Parents: aca0abe
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Mar 4 18:04:22 2015 +0000
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:23 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../yarn/client/api/impl/YarnClientImpl.java    | 19 +++++--
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  2 +-
 .../yarn/client/api/impl/TestYarnClient.java    | 55 ++++++++++++++++++--
 4 files changed, 68 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3604b5/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5eaf4f4..03bb20b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -692,6 +692,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's
     available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
+    YARN-3131. YarnClientImpl should check FAILED and KILLED state in
+    submitApplication (Chang Li via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3604b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 6acf7d8..d6b36bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -254,13 +254,22 @@ public class YarnClientImpl extends YarnClient {
 
     int pollCount = 0;
     long startTime = System.currentTimeMillis();
-
+    EnumSet<YarnApplicationState> waitingStates = 
+                                 EnumSet.of(YarnApplicationState.NEW,
+                                 YarnApplicationState.NEW_SAVING,
+                                 YarnApplicationState.SUBMITTED);
+    EnumSet<YarnApplicationState> failToSubmitStates = 
+                                  EnumSet.of(YarnApplicationState.FAILED,
+                                  YarnApplicationState.KILLED);		
     while (true) {
       try {
-        YarnApplicationState state =
-            getApplicationReport(applicationId).getYarnApplicationState();
-        if (!state.equals(YarnApplicationState.NEW) &&
-            !state.equals(YarnApplicationState.NEW_SAVING)) {
+        ApplicationReport appReport = getApplicationReport(applicationId);
+        YarnApplicationState state = appReport.getYarnApplicationState();
+        if (!waitingStates.contains(state)) {
+          if(failToSubmitStates.contains(state)) {
+            throw new YarnException("Failed to submit " + applicationId + 
+                " to YARN : " + appReport.getDiagnostics());
+          }
           LOG.info("Submitted application " + applicationId);
           break;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3604b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index da7d505..782bc43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -646,7 +646,7 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
       ApplicationReport report =
           ApplicationReport.newInstance(appId, attemptId, "fakeUser",
               "fakeQueue", "fakeApplicationName", "localhost", 0, null,
-              YarnApplicationState.FAILED, "fake an application report", "",
+              YarnApplicationState.FINISHED, "fake an application report", "",
               1000l, 1200l, FinalApplicationStatus.FAILED, null, "", 50f,
               "fakeApplicationType", null);
       return report;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3604b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 7e97134..9946506 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -157,12 +157,9 @@ public class TestYarnClient {
 
     YarnApplicationState[] exitStates = new YarnApplicationState[]
         {
-          YarnApplicationState.SUBMITTED,
           YarnApplicationState.ACCEPTED,
           YarnApplicationState.RUNNING,
-          YarnApplicationState.FINISHED,
-          YarnApplicationState.FAILED,
-          YarnApplicationState.KILLED
+          YarnApplicationState.FINISHED
         };
 
     // Submit an application without ApplicationId provided
@@ -203,6 +200,54 @@ public class TestYarnClient {
     client.stop();
   }
 
+  @Test (timeout = 30000)
+  public void testSubmitIncorrectQueue() throws IOException {
+    MiniYARNCluster cluster = new MiniYARNCluster("testMRAMTokens", 1, 1, 1);
+    YarnClient rmClient = null;
+    try {
+      cluster.init(new YarnConfiguration());
+	     cluster.start();
+      final Configuration yarnConf = cluster.getConfig();
+      rmClient = YarnClient.createYarnClient();
+      rmClient.init(yarnConf);
+      rmClient.start();
+      YarnClientApplication newApp = rmClient.createApplication();
+
+      ApplicationId appId = newApp.getNewApplicationResponse().getApplicationId();
+
+      // Create launch context for app master
+      ApplicationSubmissionContext appContext
+        = Records.newRecord(ApplicationSubmissionContext.class);
+
+      // set the application id
+      appContext.setApplicationId(appId);
+
+      // set the application name
+      appContext.setApplicationName("test");
+
+      // Set the queue to which this application is to be submitted in the RM
+      appContext.setQueue("nonexist");
+
+      // Set up the container launch context for the application master
+      ContainerLaunchContext amContainer
+        = Records.newRecord(ContainerLaunchContext.class);
+      appContext.setAMContainerSpec(amContainer);
+      appContext.setResource(Resource.newInstance(1024, 1));
+      // appContext.setUnmanagedAM(unmanaged);
+
+      // Submit the application to the applications manager
+      rmClient.submitApplication(appContext);
+      Assert.fail("Job submission should have thrown an exception");
+    } catch (YarnException e) {
+      Assert.assertTrue(e.getMessage().contains("Failed to submit"));
+    } finally {
+      if (rmClient != null) {
+        rmClient.stop();
+      }
+      cluster.stop();
+    }
+  }
+  
   @Test
   public void testKillApplication() throws Exception {
     MockRM rm = new MockRM();
@@ -998,7 +1043,7 @@ public class TestYarnClient {
       public ApplicationReport getApplicationReport(ApplicationId appId) {
         ApplicationReport report = mock(ApplicationReport.class);
         when(report.getYarnApplicationState())
-            .thenReturn(YarnApplicationState.SUBMITTED);
+            .thenReturn(YarnApplicationState.RUNNING);
         return report;
       }
 


[43/50] [abbrv] hadoop git commit: HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.

Posted by ji...@apache.org.
HDFS-7857. Improve authentication failure WARN message to avoid user confusion. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/129f88a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/129f88a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/129f88a7

Branch: refs/heads/HDFS-7285
Commit: 129f88a7aaef3b4db549570e2784d2daf432feea
Parents: 1040f70
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Sun Mar 8 20:39:46 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:17:55 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ipc/Server.java         | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/129f88a7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 893e0eb..d2d61b3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1324,10 +1324,15 @@ public abstract class Server {
           saslResponse = processSaslMessage(saslMessage);
         } catch (IOException e) {
           rpcMetrics.incrAuthenticationFailures();
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(StringUtils.stringifyException(e));
+          }
           // attempting user could be null
+          IOException tce = (IOException) getTrueCause(e);
           AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
-              + attemptingUser + " (" + e.getLocalizedMessage() + ")");
-          throw (IOException) getTrueCause(e);
+              + attemptingUser + " (" + e.getLocalizedMessage()
+              + ") with true cause: (" + tce.getLocalizedMessage() + ")");
+          throw tce;
         }
         
         if (saslServer != null && saslServer.isComplete()) {


[22/50] [abbrv] hadoop git commit: MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own class. (Chris Trezzo via kasha)

Posted by ji...@apache.org.
MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own class. (Chris Trezzo via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba4d888d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba4d888d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba4d888d

Branch: refs/heads/HDFS-7285
Commit: ba4d888d037125e1434bd6d238c41041b2beefa7
Parents: 39535ec
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Mar 4 14:42:07 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:24 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../java/org/apache/hadoop/mapreduce/Job.java   |   1 +
 .../hadoop/mapreduce/JobResourceUploader.java   | 363 +++++++++++++++++++
 .../apache/hadoop/mapreduce/JobSubmitter.java   | 312 +---------------
 4 files changed, 370 insertions(+), 309 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba4d888d/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index b2ae9d9..212727e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6248. Exposed the internal MapReduce job's information as a public
     API in DistCp. (Jing Zhao via vinodkv)
 
+    MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
+    class. (Chris Trezzo via kasha)
+
   OPTIMIZATIONS
 
     MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba4d888d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index f404175..9eea4cc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -98,6 +98,7 @@ public class Job extends JobContextImpl implements JobContext {
     "mapreduce.client.genericoptionsparser.used";
   public static final String SUBMIT_REPLICATION = 
     "mapreduce.client.submit.file.replication";
+  public static final int DEFAULT_SUBMIT_REPLICATION = 10;
 
   @InterfaceStability.Evolving
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba4d888d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
new file mode 100644
index 0000000..eebdf88
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.UnknownHostException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
+import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class JobResourceUploader {
+  protected static final Log LOG = LogFactory.getLog(JobResourceUploader.class);
+  private FileSystem jtFs;
+
+  JobResourceUploader(FileSystem submitFs) {
+    this.jtFs = submitFs;
+  }
+
+  /**
+   * Upload and configure files, libjars, jobjars, and archives pertaining to
+   * the passed job.
+   * 
+   * @param job the job containing the files to be uploaded
+   * @param submitJobDir the submission directory of the job
+   * @throws IOException
+   */
+  public void uploadFiles(Job job, Path submitJobDir) throws IOException {
+    Configuration conf = job.getConfiguration();
+    short replication =
+        (short) conf.getInt(Job.SUBMIT_REPLICATION,
+            Job.DEFAULT_SUBMIT_REPLICATION);
+
+    if (!(conf.getBoolean(Job.USED_GENERIC_PARSER, false))) {
+      LOG.warn("Hadoop command-line option parsing not performed. "
+          + "Implement the Tool interface and execute your application "
+          + "with ToolRunner to remedy this.");
+    }
+
+    // get all the command line arguments passed in by the user conf
+    String files = conf.get("tmpfiles");
+    String libjars = conf.get("tmpjars");
+    String archives = conf.get("tmparchives");
+    String jobJar = job.getJar();
+
+    //
+    // Figure out what fs the JobTracker is using. Copy the
+    // job to it, under a temporary name. This allows DFS to work,
+    // and under the local fs also provides UNIX-like object loading
+    // semantics. (that is, if the job file is deleted right after
+    // submission, we can still run the submission to completion)
+    //
+
+    // Create a number of filenames in the JobTracker's fs namespace
+    LOG.debug("default FileSystem: " + jtFs.getUri());
+    if (jtFs.exists(submitJobDir)) {
+      throw new IOException("Not submitting job. Job directory " + submitJobDir
+          + " already exists!! This is unexpected.Please check what's there in"
+          + " that directory");
+    }
+    submitJobDir = jtFs.makeQualified(submitJobDir);
+    submitJobDir = new Path(submitJobDir.toUri().getPath());
+    FsPermission mapredSysPerms =
+        new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
+    FileSystem.mkdirs(jtFs, submitJobDir, mapredSysPerms);
+    Path filesDir = JobSubmissionFiles.getJobDistCacheFiles(submitJobDir);
+    Path archivesDir = JobSubmissionFiles.getJobDistCacheArchives(submitJobDir);
+    Path libjarsDir = JobSubmissionFiles.getJobDistCacheLibjars(submitJobDir);
+    // add all the command line files/ jars and archive
+    // first copy them to jobtrackers filesystem
+
+    if (files != null) {
+      FileSystem.mkdirs(jtFs, filesDir, mapredSysPerms);
+      String[] fileArr = files.split(",");
+      for (String tmpFile : fileArr) {
+        URI tmpURI = null;
+        try {
+          tmpURI = new URI(tmpFile);
+        } catch (URISyntaxException e) {
+          throw new IllegalArgumentException(e);
+        }
+        Path tmp = new Path(tmpURI);
+        Path newPath = copyRemoteFiles(filesDir, tmp, conf, replication);
+        try {
+          URI pathURI = getPathURI(newPath, tmpURI.getFragment());
+          DistributedCache.addCacheFile(pathURI, conf);
+        } catch (URISyntaxException ue) {
+          // should not throw a uri exception
+          throw new IOException("Failed to create uri for " + tmpFile, ue);
+        }
+      }
+    }
+
+    if (libjars != null) {
+      FileSystem.mkdirs(jtFs, libjarsDir, mapredSysPerms);
+      String[] libjarsArr = libjars.split(",");
+      for (String tmpjars : libjarsArr) {
+        Path tmp = new Path(tmpjars);
+        Path newPath = copyRemoteFiles(libjarsDir, tmp, conf, replication);
+        DistributedCache.addFileToClassPath(
+            new Path(newPath.toUri().getPath()), conf);
+      }
+    }
+
+    if (archives != null) {
+      FileSystem.mkdirs(jtFs, archivesDir, mapredSysPerms);
+      String[] archivesArr = archives.split(",");
+      for (String tmpArchives : archivesArr) {
+        URI tmpURI;
+        try {
+          tmpURI = new URI(tmpArchives);
+        } catch (URISyntaxException e) {
+          throw new IllegalArgumentException(e);
+        }
+        Path tmp = new Path(tmpURI);
+        Path newPath = copyRemoteFiles(archivesDir, tmp, conf, replication);
+        try {
+          URI pathURI = getPathURI(newPath, tmpURI.getFragment());
+          DistributedCache.addCacheArchive(pathURI, conf);
+        } catch (URISyntaxException ue) {
+          // should not throw an uri excpetion
+          throw new IOException("Failed to create uri for " + tmpArchives, ue);
+        }
+      }
+    }
+
+    if (jobJar != null) { // copy jar to JobTracker's fs
+      // use jar name if job is not named.
+      if ("".equals(job.getJobName())) {
+        job.setJobName(new Path(jobJar).getName());
+      }
+      Path jobJarPath = new Path(jobJar);
+      URI jobJarURI = jobJarPath.toUri();
+      // If the job jar is already in a global fs,
+      // we don't need to copy it from local fs
+      if (jobJarURI.getScheme() == null || jobJarURI.getScheme().equals("file")) {
+        copyJar(jobJarPath, JobSubmissionFiles.getJobJar(submitJobDir),
+            replication);
+        job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString());
+      }
+    } else {
+      LOG.warn("No job jar file set.  User classes may not be found. "
+          + "See Job or Job#setJar(String).");
+    }
+
+    addLog4jToDistributedCache(job, submitJobDir);
+
+    // set the timestamps of the archives and files
+    // set the public/private visibility of the archives and files
+    ClientDistributedCacheManager.determineTimestampsAndCacheVisibilities(conf);
+    // get DelegationToken for cached file
+    ClientDistributedCacheManager.getDelegationTokens(conf,
+        job.getCredentials());
+  }
+
+  // copies a file to the jobtracker filesystem and returns the path where it
+  // was copied to
+  private Path copyRemoteFiles(Path parentDir, Path originalPath,
+      Configuration conf, short replication) throws IOException {
+    // check if we do not need to copy the files
+    // is jt using the same file system.
+    // just checking for uri strings... doing no dns lookups
+    // to see if the filesystems are the same. This is not optimal.
+    // but avoids name resolution.
+
+    FileSystem remoteFs = null;
+    remoteFs = originalPath.getFileSystem(conf);
+    if (compareFs(remoteFs, jtFs)) {
+      return originalPath;
+    }
+    // this might have name collisions. copy will throw an exception
+    // parse the original path to create new path
+    Path newPath = new Path(parentDir, originalPath.getName());
+    FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
+    jtFs.setReplication(newPath, replication);
+    return newPath;
+  }
+
+  /*
+   * see if two file systems are the same or not.
+   */
+  private boolean compareFs(FileSystem srcFs, FileSystem destFs) {
+    URI srcUri = srcFs.getUri();
+    URI dstUri = destFs.getUri();
+    if (srcUri.getScheme() == null) {
+      return false;
+    }
+    if (!srcUri.getScheme().equals(dstUri.getScheme())) {
+      return false;
+    }
+    String srcHost = srcUri.getHost();
+    String dstHost = dstUri.getHost();
+    if ((srcHost != null) && (dstHost != null)) {
+      try {
+        srcHost = InetAddress.getByName(srcHost).getCanonicalHostName();
+        dstHost = InetAddress.getByName(dstHost).getCanonicalHostName();
+      } catch (UnknownHostException ue) {
+        return false;
+      }
+      if (!srcHost.equals(dstHost)) {
+        return false;
+      }
+    } else if (srcHost == null && dstHost != null) {
+      return false;
+    } else if (srcHost != null && dstHost == null) {
+      return false;
+    }
+    // check for ports
+    if (srcUri.getPort() != dstUri.getPort()) {
+      return false;
+    }
+    return true;
+  }
+
+  private void copyJar(Path originalJarPath, Path submitJarFile,
+      short replication) throws IOException {
+    jtFs.copyFromLocalFile(originalJarPath, submitJarFile);
+    jtFs.setReplication(submitJarFile, replication);
+    jtFs.setPermission(submitJarFile, new FsPermission(
+        JobSubmissionFiles.JOB_FILE_PERMISSION));
+  }
+
+  private void addLog4jToDistributedCache(Job job, Path jobSubmitDir)
+      throws IOException {
+    Configuration conf = job.getConfiguration();
+    String log4jPropertyFile =
+        conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE, "");
+    if (!log4jPropertyFile.isEmpty()) {
+      short replication = (short) conf.getInt(Job.SUBMIT_REPLICATION, 10);
+      copyLog4jPropertyFile(job, jobSubmitDir, replication);
+    }
+  }
+
+  private URI getPathURI(Path destPath, String fragment)
+      throws URISyntaxException {
+    URI pathURI = destPath.toUri();
+    if (pathURI.getFragment() == null) {
+      if (fragment == null) {
+        pathURI = new URI(pathURI.toString() + "#" + destPath.getName());
+      } else {
+        pathURI = new URI(pathURI.toString() + "#" + fragment);
+      }
+    }
+    return pathURI;
+  }
+
+  // copy user specified log4j.property file in local
+  // to HDFS with putting on distributed cache and adding its parent directory
+  // to classpath.
+  @SuppressWarnings("deprecation")
+  private void copyLog4jPropertyFile(Job job, Path submitJobDir,
+      short replication) throws IOException {
+    Configuration conf = job.getConfiguration();
+
+    String file =
+        validateFilePath(
+            conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE), conf);
+    LOG.debug("default FileSystem: " + jtFs.getUri());
+    FsPermission mapredSysPerms =
+        new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
+    if (!jtFs.exists(submitJobDir)) {
+      throw new IOException("Cannot find job submission directory! "
+          + "It should just be created, so something wrong here.");
+    }
+
+    Path fileDir = JobSubmissionFiles.getJobLog4jFile(submitJobDir);
+
+    // first copy local log4j.properties file to HDFS under submitJobDir
+    if (file != null) {
+      FileSystem.mkdirs(jtFs, fileDir, mapredSysPerms);
+      URI tmpURI = null;
+      try {
+        tmpURI = new URI(file);
+      } catch (URISyntaxException e) {
+        throw new IllegalArgumentException(e);
+      }
+      Path tmp = new Path(tmpURI);
+      Path newPath = copyRemoteFiles(fileDir, tmp, conf, replication);
+      DistributedCache.addFileToClassPath(new Path(newPath.toUri().getPath()),
+          conf);
+    }
+  }
+
+  /**
+   * takes input as a path string for file and verifies if it exist. It defaults
+   * for file:/// if the files specified do not have a scheme. it returns the
+   * paths uri converted defaulting to file:///. So an input of /home/user/file1
+   * would return file:///home/user/file1
+   * 
+   * @param file
+   * @param conf
+   * @return
+   */
+  private String validateFilePath(String file, Configuration conf)
+      throws IOException {
+    if (file == null) {
+      return null;
+    }
+    if (file.isEmpty()) {
+      throw new IllegalArgumentException("File name can't be empty string");
+    }
+    String finalPath;
+    URI pathURI;
+    try {
+      pathURI = new URI(file);
+    } catch (URISyntaxException e) {
+      throw new IllegalArgumentException(e);
+    }
+    Path path = new Path(pathURI);
+    FileSystem localFs = FileSystem.getLocal(conf);
+    if (pathURI.getScheme() == null) {
+      // default to the local file system
+      // check if the file exists or not first
+      if (!localFs.exists(path)) {
+        throw new FileNotFoundException("File " + file + " does not exist.");
+      }
+      finalPath =
+          path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory())
+              .toString();
+    } else {
+      // check if the file exists in this file system
+      // we need to recreate this filesystem object to copy
+      // these files to the file system ResourceManager is running
+      // on.
+      FileSystem fs = path.getFileSystem(conf);
+      if (!fs.exists(path)) {
+        throw new FileNotFoundException("File " + file + " does not exist.");
+      }
+      finalPath =
+          path.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
+    }
+    return finalPath;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba4d888d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 75357f7..30a87c7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -86,297 +86,6 @@ class JobSubmitter {
     this.submitClient = submitClient;
     this.jtFs = submitFs;
   }
-  /*
-   * see if two file systems are the same or not.
-   */
-  private boolean compareFs(FileSystem srcFs, FileSystem destFs) {
-    URI srcUri = srcFs.getUri();
-    URI dstUri = destFs.getUri();
-    if (srcUri.getScheme() == null) {
-      return false;
-    }
-    if (!srcUri.getScheme().equals(dstUri.getScheme())) {
-      return false;
-    }
-    String srcHost = srcUri.getHost();    
-    String dstHost = dstUri.getHost();
-    if ((srcHost != null) && (dstHost != null)) {
-      try {
-        srcHost = InetAddress.getByName(srcHost).getCanonicalHostName();
-        dstHost = InetAddress.getByName(dstHost).getCanonicalHostName();
-      } catch(UnknownHostException ue) {
-        return false;
-      }
-      if (!srcHost.equals(dstHost)) {
-        return false;
-      }
-    } else if (srcHost == null && dstHost != null) {
-      return false;
-    } else if (srcHost != null && dstHost == null) {
-      return false;
-    }
-    //check for ports
-    if (srcUri.getPort() != dstUri.getPort()) {
-      return false;
-    }
-    return true;
-  }
-
-  // copies a file to the jobtracker filesystem and returns the path where it
-  // was copied to
-  private Path copyRemoteFiles(Path parentDir,
-      Path originalPath, Configuration conf, short replication) 
-      throws IOException {
-    //check if we do not need to copy the files
-    // is jt using the same file system.
-    // just checking for uri strings... doing no dns lookups 
-    // to see if the filesystems are the same. This is not optimal.
-    // but avoids name resolution.
-    
-    FileSystem remoteFs = null;
-    remoteFs = originalPath.getFileSystem(conf);
-    if (compareFs(remoteFs, jtFs)) {
-      return originalPath;
-    }
-    // this might have name collisions. copy will throw an exception
-    //parse the original path to create new path
-    Path newPath = new Path(parentDir, originalPath.getName());
-    FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
-    jtFs.setReplication(newPath, replication);
-    return newPath;
-  }
-
-  // configures -files, -libjars and -archives.
-  private void copyAndConfigureFiles(Job job, Path submitJobDir,
-      short replication) throws IOException {
-    Configuration conf = job.getConfiguration();
-    if (!(conf.getBoolean(Job.USED_GENERIC_PARSER, false))) {
-      LOG.warn("Hadoop command-line option parsing not performed. " +
-               "Implement the Tool interface and execute your application " +
-               "with ToolRunner to remedy this.");
-    }
-
-    // get all the command line arguments passed in by the user conf
-    String files = conf.get("tmpfiles");
-    String libjars = conf.get("tmpjars");
-    String archives = conf.get("tmparchives");
-    String jobJar = job.getJar();
-
-    //
-    // Figure out what fs the JobTracker is using.  Copy the
-    // job to it, under a temporary name.  This allows DFS to work,
-    // and under the local fs also provides UNIX-like object loading 
-    // semantics.  (that is, if the job file is deleted right after
-    // submission, we can still run the submission to completion)
-    //
-
-    // Create a number of filenames in the JobTracker's fs namespace
-    LOG.debug("default FileSystem: " + jtFs.getUri());
-    if (jtFs.exists(submitJobDir)) {
-      throw new IOException("Not submitting job. Job directory " + submitJobDir
-          +" already exists!! This is unexpected.Please check what's there in" +
-          " that directory");
-    }
-    submitJobDir = jtFs.makeQualified(submitJobDir);
-    submitJobDir = new Path(submitJobDir.toUri().getPath());
-    FsPermission mapredSysPerms = new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
-    FileSystem.mkdirs(jtFs, submitJobDir, mapredSysPerms);
-    Path filesDir = JobSubmissionFiles.getJobDistCacheFiles(submitJobDir);
-    Path archivesDir = JobSubmissionFiles.getJobDistCacheArchives(submitJobDir);
-    Path libjarsDir = JobSubmissionFiles.getJobDistCacheLibjars(submitJobDir);
-    // add all the command line files/ jars and archive
-    // first copy them to jobtrackers filesystem 
-      
-    if (files != null) {
-      FileSystem.mkdirs(jtFs, filesDir, mapredSysPerms);
-      String[] fileArr = files.split(",");
-      for (String tmpFile: fileArr) {
-        URI tmpURI = null;
-        try {
-          tmpURI = new URI(tmpFile);
-        } catch (URISyntaxException e) {
-          throw new IllegalArgumentException(e);
-        }
-        Path tmp = new Path(tmpURI);
-        Path newPath = copyRemoteFiles(filesDir, tmp, conf, replication);
-        try {
-          URI pathURI = getPathURI(newPath, tmpURI.getFragment());
-          DistributedCache.addCacheFile(pathURI, conf);
-        } catch(URISyntaxException ue) {
-          //should not throw a uri exception 
-          throw new IOException("Failed to create uri for " + tmpFile, ue);
-        }
-      }
-    }
-      
-    if (libjars != null) {
-      FileSystem.mkdirs(jtFs, libjarsDir, mapredSysPerms);
-      String[] libjarsArr = libjars.split(",");
-      for (String tmpjars: libjarsArr) {
-        Path tmp = new Path(tmpjars);
-        Path newPath = copyRemoteFiles(libjarsDir, tmp, conf, replication);
-        DistributedCache.addFileToClassPath(
-            new Path(newPath.toUri().getPath()), conf);
-      }
-    }
-      
-    if (archives != null) {
-      FileSystem.mkdirs(jtFs, archivesDir, mapredSysPerms); 
-      String[] archivesArr = archives.split(",");
-      for (String tmpArchives: archivesArr) {
-        URI tmpURI;
-        try {
-          tmpURI = new URI(tmpArchives);
-        } catch (URISyntaxException e) {
-          throw new IllegalArgumentException(e);
-        }
-        Path tmp = new Path(tmpURI);
-        Path newPath = copyRemoteFiles(archivesDir, tmp, conf,
-          replication);
-        try {
-          URI pathURI = getPathURI(newPath, tmpURI.getFragment());
-          DistributedCache.addCacheArchive(pathURI, conf);
-        } catch(URISyntaxException ue) {
-          //should not throw an uri excpetion
-          throw new IOException("Failed to create uri for " + tmpArchives, ue);
-        }
-      }
-    }
-
-    if (jobJar != null) {   // copy jar to JobTracker's fs
-      // use jar name if job is not named. 
-      if ("".equals(job.getJobName())){
-        job.setJobName(new Path(jobJar).getName());
-      }
-      Path jobJarPath = new Path(jobJar);
-      URI jobJarURI = jobJarPath.toUri();
-      // If the job jar is already in a global fs,
-      // we don't need to copy it from local fs
-      if (     jobJarURI.getScheme() == null
-            || jobJarURI.getScheme().equals("file")) {
-        copyJar(jobJarPath, JobSubmissionFiles.getJobJar(submitJobDir), 
-            replication);
-        job.setJar(JobSubmissionFiles.getJobJar(submitJobDir).toString());
-      }
-    } else {
-      LOG.warn("No job jar file set.  User classes may not be found. "+
-      "See Job or Job#setJar(String).");
-    }
-    
-    addLog4jToDistributedCache(job, submitJobDir);
-    
-    //  set the timestamps of the archives and files
-    //  set the public/private visibility of the archives and files
-    ClientDistributedCacheManager.determineTimestampsAndCacheVisibilities(conf);
-    // get DelegationToken for cached file
-    ClientDistributedCacheManager.getDelegationTokens(conf, job
-        .getCredentials());
-  }
-  
-  // copy user specified log4j.property file in local 
-  // to HDFS with putting on distributed cache and adding its parent directory 
-  // to classpath.
-  @SuppressWarnings("deprecation")
-  private void copyLog4jPropertyFile(Job job, Path submitJobDir,
-      short replication) throws IOException {
-    Configuration conf = job.getConfiguration();
-
-    String file = validateFilePath(
-        conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE), conf);
-    LOG.debug("default FileSystem: " + jtFs.getUri());
-    FsPermission mapredSysPerms = 
-      new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
-    if (!jtFs.exists(submitJobDir)) {
-      throw new IOException("Cannot find job submission directory! " 
-          + "It should just be created, so something wrong here.");
-    }
-    
-    Path fileDir = JobSubmissionFiles.getJobLog4jFile(submitJobDir);
-
-    // first copy local log4j.properties file to HDFS under submitJobDir
-    if (file != null) {
-      FileSystem.mkdirs(jtFs, fileDir, mapredSysPerms);
-      URI tmpURI = null;
-      try {
-        tmpURI = new URI(file);
-      } catch (URISyntaxException e) {
-        throw new IllegalArgumentException(e);
-      }
-      Path tmp = new Path(tmpURI);
-      Path newPath = copyRemoteFiles(fileDir, tmp, conf, replication);
-      DistributedCache.addFileToClassPath(new Path(newPath.toUri().getPath()), conf);
-    }
-  }
-  
-  /**
-   * takes input as a path string for file and verifies if it exist. 
-   * It defaults for file:/// if the files specified do not have a scheme.
-   * it returns the paths uri converted defaulting to file:///.
-   * So an input of  /home/user/file1 would return file:///home/user/file1
-   * @param file
-   * @param conf
-   * @return
-   */
-  private String validateFilePath(String file, Configuration conf) 
-      throws IOException  {
-    if (file == null) {
-      return null;
-    }
-    if (file.isEmpty()) {
-      throw new IllegalArgumentException("File name can't be empty string");
-    }
-    String finalPath;
-    URI pathURI;
-    try {
-      pathURI = new URI(file);
-    } catch (URISyntaxException e) {
-      throw new IllegalArgumentException(e);
-    }
-    Path path = new Path(pathURI);
-    FileSystem localFs = FileSystem.getLocal(conf);
-    if (pathURI.getScheme() == null) {
-      //default to the local file system
-      //check if the file exists or not first
-      if (!localFs.exists(path)) {
-        throw new FileNotFoundException("File " + file + " does not exist.");
-      }
-      finalPath = path.makeQualified(localFs.getUri(),
-          localFs.getWorkingDirectory()).toString();
-    }
-    else {
-      // check if the file exists in this file system
-      // we need to recreate this filesystem object to copy
-      // these files to the file system ResourceManager is running
-      // on.
-      FileSystem fs = path.getFileSystem(conf);
-      if (!fs.exists(path)) {
-        throw new FileNotFoundException("File " + file + " does not exist.");
-      }
-      finalPath = path.makeQualified(fs.getUri(),
-          fs.getWorkingDirectory()).toString();
-    }
-    return finalPath;
-  }
-  
-  private URI getPathURI(Path destPath, String fragment) 
-      throws URISyntaxException {
-    URI pathURI = destPath.toUri();
-    if (pathURI.getFragment() == null) {
-      if (fragment == null) {
-        pathURI = new URI(pathURI.toString() + "#" + destPath.getName());
-      } else {
-        pathURI = new URI(pathURI.toString() + "#" + fragment);
-      }
-    }
-    return pathURI;
-  }
-  
-  private void copyJar(Path originalJarPath, Path submitJarFile,
-      short replication) throws IOException {
-    jtFs.copyFromLocalFile(originalJarPath, submitJarFile);
-    jtFs.setReplication(submitJarFile, replication);
-    jtFs.setPermission(submitJarFile, new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION));
-  }
   
   /**
    * configure the jobconf of the user with the command line options of 
@@ -386,9 +95,8 @@ class JobSubmitter {
    */
   private void copyAndConfigureFiles(Job job, Path jobSubmitDir) 
   throws IOException {
-    Configuration conf = job.getConfiguration();
-    short replication = (short)conf.getInt(Job.SUBMIT_REPLICATION, 10);
-    copyAndConfigureFiles(job, jobSubmitDir, replication);
+    JobResourceUploader rUploader = new JobResourceUploader(jtFs);
+    rUploader.uploadFiles(job, jobSubmitDir);
 
     // Get the working directory. If not set, sets it to filesystem working dir
     // This code has been added so that working directory reset before running
@@ -396,8 +104,8 @@ class JobSubmitter {
     // might use the public API JobConf#setWorkingDirectory to reset the working
     // directory.
     job.getWorkingDirectory();
-
   }
+
   /**
    * Internal method for submitting jobs to the system.
    * 
@@ -484,10 +192,7 @@ class JobSubmitter {
       }
 
       copyAndConfigureFiles(job, submitJobDir);
-      
-      
 
-      
       Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
       
       // Create the splits for the job
@@ -766,15 +471,4 @@ class JobSubmitter {
       DistributedCache.addCacheArchive(uri, conf);
     }
   }
-  
-  private void addLog4jToDistributedCache(Job job,
-      Path jobSubmitDir) throws IOException {
-    Configuration conf = job.getConfiguration();
-    String log4jPropertyFile =
-        conf.get(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE, "");
-    if (!log4jPropertyFile.isEmpty()) {
-      short replication = (short)conf.getInt(Job.SUBMIT_REPLICATION, 10);
-      copyLog4jPropertyFile(job, jobSubmitDir, replication);
-    }
-  }
 }


[31/50] [abbrv] hadoop git commit: YARN-2786. Created a yarn cluster CLI and seeded with one command for listing node-labels collection. Contributed by Wangda Tan.

Posted by ji...@apache.org.
YARN-2786. Created a yarn cluster CLI and seeded with one command for listing node-labels collection. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a638ed6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a638ed6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a638ed6

Branch: refs/heads/HDFS-7285
Commit: 7a638ed67a225ea45b258db6926f51bb354c1564
Parents: 22b1f53
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Thu Mar 5 10:54:34 2015 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:25 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn        |   6 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd    |   6 +
 .../hadoop/yarn/client/cli/ClusterCLI.java      | 157 ++++++++++++++++++
 .../hadoop/yarn/client/cli/TestClusterCLI.java  | 158 +++++++++++++++++++
 5 files changed, 330 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5f61462..dcf328f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -97,6 +97,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3249. Add a 'kill application' button to Resource Manager's Web UI.
     (Ryu Kobayashi via ozawa)
 
+    YARN-2786. Created a yarn cluster CLI and seeded with one command for listing
+    node-labels collection. (Wangda Tan via vinodkv)
+
   IMPROVEMENTS
 
     YARN-3005. [JDK7] Use switch statement for String instead of if-else

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index f1a06a6..e6af4ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -25,6 +25,7 @@ function hadoop_usage
   echo "  applicationattempt                    prints applicationattempt(s) report"
   echo "  classpath                             prints the class path needed to get the"
   echo "                                        Hadoop jar and the required libraries"
+  echo "  cluster                               prints cluster information"
   echo "  container                             prints container(s) report"
   echo "  daemonlog                             get/set the log level for each daemon"
   echo "  jar <jar>                             run a jar file"
@@ -83,6 +84,11 @@ case "${COMMAND}" in
   classpath)
     hadoop_do_classpath_subcommand "$@"
   ;;
+  cluster)
+    CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+    hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
+    YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
+  ;;
   daemonlog)
     CLASS=org.apache.hadoop.log.LogLevel
     hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index 3f68b16..c29ee53 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -192,6 +192,11 @@ goto :eof
   set yarn-command-arguments=%yarn-command% %yarn-command-arguments%
   goto :eof
 
+:cluster
+  set CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI
+  set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+  goto :eof
+
 :container
   set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
   set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
@@ -312,6 +317,7 @@ goto :eof
   @echo   jar ^<jar^>          run a jar file
   @echo   application          prints application(s) report/kill application
   @echo   applicationattempt   prints applicationattempt(s) report
+  @echo   cluster              prints cluster information
   @echo   container            prints container(s) report
   @echo   node                 prints node report(s)
   @echo   queue                prints queue information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
new file mode 100644
index 0000000..3924803
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.cli;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.MissingArgumentException;
+import org.apache.commons.cli.Options;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Cluster CLI used to get over all information of the cluster
+ */
+@Private
+public class ClusterCLI extends YarnCLI {
+  private static final String TITLE = "yarn cluster";
+  public static final String LIST_LABELS_CMD = "list-node-labels";
+  public static final String DIRECTLY_ACCESS_NODE_LABEL_STORE =
+      "directly-access-node-label-store";
+  public static final String CMD = "cluster";
+  private boolean accessLocal = false;
+  static CommonNodeLabelsManager localNodeLabelsManager = null;
+
+  public static void main(String[] args) throws Exception {
+    ClusterCLI cli = new ClusterCLI();
+    cli.setSysOutPrintStream(System.out);
+    cli.setSysErrPrintStream(System.err);
+    int res = ToolRunner.run(cli, args);
+    cli.stop();
+    System.exit(res);
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {    
+    Options opts = new Options();
+
+    opts.addOption("lnl", LIST_LABELS_CMD, false,
+        "List cluster node-label collection");
+    opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
+    opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false,
+        "Directly access node label store, "
+            + "with this option, all node label related operations"
+            + " will NOT connect RM. Instead, they will"
+            + " access/modify stored node labels directly."
+            + " By default, it is false (access via RM)."
+            + " AND PLEASE NOTE: if you configured "
+            + YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR
+            + " to a local directory"
+            + " (instead of NFS or HDFS), this option will only work"
+            + " when the command run on the machine where RM is running."
+            + " Also, this option is UNSTABLE, could be removed in future"
+            + " releases.");
+
+    int exitCode = -1;
+    CommandLine parsedCli = null;
+    try {
+      parsedCli = new GnuParser().parse(opts, args);
+    } catch (MissingArgumentException ex) {
+      sysout.println("Missing argument for options");
+      printUsage(opts);
+      return exitCode;
+    }
+
+    if (parsedCli.hasOption(DIRECTLY_ACCESS_NODE_LABEL_STORE)) {
+      accessLocal = true;
+    }
+
+    if (parsedCli.hasOption(LIST_LABELS_CMD)) {
+      printClusterNodeLabels();
+    } else if (parsedCli.hasOption(HELP_CMD)) {
+      printUsage(opts);
+      return 0;
+    } else {
+      syserr.println("Invalid Command Usage : ");
+      printUsage(opts);
+    }
+    return 0;
+  }
+
+  private List<String> sortStrSet(Set<String> labels) {
+    List<String> list = new ArrayList<String>();
+    list.addAll(labels);
+    Collections.sort(list);
+    return list;
+  }
+
+  void printClusterNodeLabels() throws YarnException, IOException {
+    Set<String> nodeLabels = null;
+    if (accessLocal) {
+      nodeLabels =
+          getNodeLabelManagerInstance(getConf()).getClusterNodeLabels();
+    } else {
+      nodeLabels = client.getClusterNodeLabels();
+    }
+    sysout.println(String.format("Node Labels: %s",
+        StringUtils.join(sortStrSet(nodeLabels).iterator(), ",")));
+  }
+
+  @VisibleForTesting
+  static synchronized CommonNodeLabelsManager
+      getNodeLabelManagerInstance(Configuration conf) {
+    if (localNodeLabelsManager == null) {
+      localNodeLabelsManager = new CommonNodeLabelsManager();
+      localNodeLabelsManager.init(conf);
+      localNodeLabelsManager.start();
+    }
+    return localNodeLabelsManager;
+  }
+
+  @VisibleForTesting
+  void printUsage(Options opts) throws UnsupportedEncodingException {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw =
+        new PrintWriter(new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+    new HelpFormatter().printHelp(pw, HelpFormatter.DEFAULT_WIDTH, TITLE, null,
+        opts, HelpFormatter.DEFAULT_LEFT_PAD, HelpFormatter.DEFAULT_DESC_PAD,
+        null);
+    pw.close();
+    sysout.println(baos.toString("UTF-8"));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a638ed6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java
new file mode 100644
index 0000000..f9ccf87
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.cli;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.util.HashSet;
+
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableSet;
+
+public class TestClusterCLI {
+  ByteArrayOutputStream sysOutStream;
+  private PrintStream sysOut;
+  ByteArrayOutputStream sysErrStream;
+  private PrintStream sysErr;
+
+  @Before
+  public void setup() {
+    sysOutStream = new ByteArrayOutputStream();
+    sysOut = spy(new PrintStream(sysOutStream));
+    sysErrStream = new ByteArrayOutputStream();
+    sysErr = spy(new PrintStream(sysErrStream));
+    System.setOut(sysOut);
+  }
+  
+  @Test
+  public void testGetClusterNodeLabels() throws Exception {
+    YarnClient client = mock(YarnClient.class);
+    when(client.getClusterNodeLabels()).thenReturn(
+        ImmutableSet.of("label1", "label2"));
+    ClusterCLI cli = new ClusterCLI();
+    cli.setClient(client);
+    cli.setSysOutPrintStream(sysOut);
+    cli.setSysErrPrintStream(sysErr);
+    
+    int rc =
+        cli.run(new String[] { ClusterCLI.CMD, "-" + ClusterCLI.LIST_LABELS_CMD });
+    assertEquals(0, rc);
+    
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw = new PrintWriter(baos);
+    pw.print("Node Labels: label1,label2");
+    pw.close();
+    verify(sysOut).println(baos.toString("UTF-8"));
+  }
+  
+  @Test
+  public void testGetClusterNodeLabelsWithLocalAccess() throws Exception {
+    YarnClient client = mock(YarnClient.class);
+    when(client.getClusterNodeLabels()).thenReturn(
+        ImmutableSet.of("remote1", "remote2"));
+    ClusterCLI cli = new ClusterCLI();
+    cli.setClient(client);
+    cli.setSysOutPrintStream(sysOut);
+    cli.setSysErrPrintStream(sysErr);
+    ClusterCLI.localNodeLabelsManager = mock(CommonNodeLabelsManager.class);
+    when(ClusterCLI.localNodeLabelsManager.getClusterNodeLabels())
+        .thenReturn(ImmutableSet.of("local1", "local2"));
+
+    int rc =
+        cli.run(new String[] { ClusterCLI.CMD,
+            "-" + ClusterCLI.LIST_LABELS_CMD,
+            "-" + ClusterCLI.DIRECTLY_ACCESS_NODE_LABEL_STORE });
+    assertEquals(0, rc);
+
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw = new PrintWriter(baos);
+    // it should return local* instead of remote*
+    pw.print("Node Labels: local1,local2");
+    pw.close();
+    verify(sysOut).println(baos.toString("UTF-8"));
+  }
+  
+  @Test
+  public void testGetEmptyClusterNodeLabels() throws Exception {
+    YarnClient client = mock(YarnClient.class);
+    when(client.getClusterNodeLabels()).thenReturn(new HashSet<String>());
+    ClusterCLI cli = new ClusterCLI();
+    cli.setClient(client);
+    cli.setSysOutPrintStream(sysOut);
+    cli.setSysErrPrintStream(sysErr);
+
+    int rc =
+        cli.run(new String[] { ClusterCLI.CMD, "-" + ClusterCLI.LIST_LABELS_CMD });
+    assertEquals(0, rc);
+
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw = new PrintWriter(baos);
+    pw.print("Node Labels: ");
+    pw.close();
+    verify(sysOut).println(baos.toString("UTF-8"));
+  }
+  
+  @Test
+  public void testHelp() throws Exception {
+    ClusterCLI cli = new ClusterCLI();
+    cli.setSysOutPrintStream(sysOut);
+    cli.setSysErrPrintStream(sysErr);
+
+    int rc =
+        cli.run(new String[] { "cluster", "--help" });
+    assertEquals(0, rc);
+    
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintWriter pw = new PrintWriter(baos);
+    pw.println("usage: yarn cluster");
+    pw.println(" -dnl,--directly-access-node-label-store   Directly access node label");
+    pw.println("                                           store, with this option, all");
+    pw.println("                                           node label related operations");
+    pw.println("                                           will NOT connect RM. Instead,");
+    pw.println("                                           they will access/modify stored");
+    pw.println("                                           node labels directly. By");
+    pw.println("                                           default, it is false (access");
+    pw.println("                                           via RM). AND PLEASE NOTE: if");
+    pw.println("                                           you configured");
+    pw.println("                                           yarn.node-labels.fs-store.root-");
+    pw.println("                                           dir to a local directory");
+    pw.println("                                           (instead of NFS or HDFS), this");
+    pw.println("                                           option will only work when the");
+    pw.println("                                           command run on the machine");
+    pw.println("                                           where RM is running. Also, this");
+    pw.println("                                           option is UNSTABLE, could be");
+    pw.println("                                           removed in future releases.");
+    pw.println(" -h,--help                                 Displays help for all commands.");
+    pw.println(" -lnl,--list-node-labels                   List cluster node-label");
+    pw.println("                                           collection");
+    pw.close();
+    verify(sysOut).println(baos.toString("UTF-8"));
+  }
+}
\ No newline at end of file


[21/50] [abbrv] hadoop git commit: HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp.

Posted by ji...@apache.org.
HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e93eee9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e93eee9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e93eee9f

Branch: refs/heads/HDFS-7285
Commit: e93eee9f69436174e83f194fc8d82c8a581ad1f2
Parents: ba4d888
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Mar 4 17:21:51 2015 -0600
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:24 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 48 ++++++++------------
 .../server/protocol/DatanodeRegistration.java   | 10 ++++
 .../blockmanagement/TestBlockManager.java       |  7 ---
 .../TestComputeInvalidateWork.java              | 16 +++++--
 .../TestDatanodeProtocolRetryPolicy.java        |  3 +-
 6 files changed, 43 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93eee9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3c6d447..2be1a4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1091,6 +1091,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
     (Chris Nauroth via wheat9)
 
+    HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93eee9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 779e3b9..f91696f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -47,19 +47,23 @@ public class DatanodeID implements Comparable<DatanodeID> {
   private int infoSecurePort; // info server port
   private int ipcPort;       // IPC server port
   private String xferAddr;
-  private int hashCode = -1;
 
   /**
    * UUID identifying a given datanode. For upgraded Datanodes this is the
    * same as the StorageID that was previously used by this Datanode. 
    * For newly formatted Datanodes it is a UUID.
    */
-  private String datanodeUuid = null;
+  private final String datanodeUuid;
 
   public DatanodeID(DatanodeID from) {
+    this(from.getDatanodeUuid(), from);
+  }
+
+  @VisibleForTesting
+  public DatanodeID(String datanodeUuid, DatanodeID from) {
     this(from.getIpAddr(),
         from.getHostName(),
-        from.getDatanodeUuid(),
+        datanodeUuid,
         from.getXferPort(),
         from.getInfoPort(),
         from.getInfoSecurePort(),
@@ -81,19 +85,24 @@ public class DatanodeID implements Comparable<DatanodeID> {
    */
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
       int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-    this.ipAddr = ipAddr;
+    setIpAndXferPort(ipAddr, xferPort);
     this.hostName = hostName;
     this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
-    this.xferPort = xferPort;
     this.infoPort = infoPort;
     this.infoSecurePort = infoSecurePort;
     this.ipcPort = ipcPort;
-    updateXferAddrAndInvalidateHashCode();
   }
   
   public void setIpAddr(String ipAddr) {
+    //updated during registration, preserve former xferPort
+    setIpAndXferPort(ipAddr, xferPort);
+  }
+
+  private void setIpAndXferPort(String ipAddr, int xferPort) {
+    // build xferAddr string to reduce cost of frequent use
     this.ipAddr = ipAddr;
-    updateXferAddrAndInvalidateHashCode();
+    this.xferPort = xferPort;
+    this.xferAddr = ipAddr + ":" + xferPort;
   }
 
   public void setPeerHostName(String peerHostName) {
@@ -107,12 +116,6 @@ public class DatanodeID implements Comparable<DatanodeID> {
     return datanodeUuid;
   }
 
-  @VisibleForTesting
-  public void setDatanodeUuidForTesting(String datanodeUuid) {
-    this.datanodeUuid = datanodeUuid;
-    updateXferAddrAndInvalidateHashCode();
-  }
-
   private String checkDatanodeUuid(String uuid) {
     if (uuid == null || uuid.isEmpty()) {
       return null;
@@ -242,11 +245,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
   
   @Override
   public int hashCode() {
-    if (hashCode == -1) {
-      int newHashCode = xferAddr.hashCode() ^ datanodeUuid.hashCode();
-      hashCode = newHashCode & Integer.MAX_VALUE;
-    }
-    return hashCode;
+    return datanodeUuid.hashCode();
   }
   
   @Override
@@ -259,14 +258,12 @@ public class DatanodeID implements Comparable<DatanodeID> {
    * Note that this does not update storageID.
    */
   public void updateRegInfo(DatanodeID nodeReg) {
-    ipAddr = nodeReg.getIpAddr();
+    setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getXferPort());
     hostName = nodeReg.getHostName();
     peerHostName = nodeReg.getPeerHostName();
-    xferPort = nodeReg.getXferPort();
     infoPort = nodeReg.getInfoPort();
     infoSecurePort = nodeReg.getInfoSecurePort();
     ipcPort = nodeReg.getIpcPort();
-    updateXferAddrAndInvalidateHashCode();
   }
     
   /**
@@ -279,13 +276,4 @@ public class DatanodeID implements Comparable<DatanodeID> {
   public int compareTo(DatanodeID that) {
     return getXferAddr().compareTo(that.getXferAddr());
   }
-
-  // NOTE: mutable hash codes are dangerous, however this class chooses to
-  // use them.  this method must be called when a value mutates that is used
-  // to compute the hash, equality, or comparison of instances.
-  private void updateXferAddrAndInvalidateHashCode() {
-    xferAddr = ipAddr + ":" + xferPort;
-    // can't compute new hash yet because uuid might still null...
-    hashCode = -1;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93eee9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index aaa18c6..9db2fca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /** 
  * DatanodeRegistration class contains all information the name-node needs
  * to identify and verify a data-node when it contacts the name-node.
@@ -39,6 +41,14 @@ public class DatanodeRegistration extends DatanodeID
   private ExportedBlockKeys exportedKeys;
   private final String softwareVersion;
 
+  @VisibleForTesting
+  public DatanodeRegistration(String uuid, DatanodeRegistration dnr) {
+    this(new DatanodeID(uuid, dnr),
+         dnr.getStorageInfo(),
+         dnr.getExportedKeys(),
+         dnr.getSoftwareVersion());
+  }
+
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
       ExportedBlockKeys keys, String softwareVersion) {
     super(dn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93eee9f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 236a583..97c9801 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -538,10 +538,6 @@ public class TestBlockManager {
   public void testSafeModeIBR() throws Exception {
     DatanodeDescriptor node = spy(nodes.get(0));
     DatanodeStorageInfo ds = node.getStorageInfos()[0];
-
-    // TODO: Needs to be fixed. DatanodeUuid is not storageID.
-    node.setDatanodeUuidForTesting(ds.getStorageID());
-
     node.isAlive = true;
 
     DatanodeRegistration nodeReg =
@@ -587,9 +583,6 @@ public class TestBlockManager {
     DatanodeDescriptor node = spy(nodes.get(0));
     DatanodeStorageInfo ds = node.getStorageInfos()[0];
 
-    // TODO: Needs to be fixed. DatanodeUuid is not storageID.
-    node.setDatanodeUuidForTesting(ds.getStorageID());
-
     node.isAlive = true;
 
     DatanodeRegistration nodeReg =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93eee9f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
index fecca4e..5b08f53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.util.VersionInfo;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
@@ -119,10 +118,17 @@ public class TestComputeInvalidateWork {
   public void testDatanodeReformat() throws Exception {
     namesystem.writeLock();
     try {
+      // Change the datanode UUID to emulate a reformat
+      String poolId = cluster.getNamesystem().getBlockPoolId();
+      DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort())
+                                        .getDNRegistrationForBP(poolId);
+      dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr);
+      cluster.stopDataNode(nodes[0].getXferAddr());
+
       Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP);
       bm.addToInvalidates(block, nodes[0]);
-      // Change the datanode UUID to emulate a reformation
-      nodes[0].setDatanodeUuidForTesting("fortesting");
+      bm.getDatanodeManager().registerDatanode(dnr);
+
       // Since UUID has changed, the invalidation work should be skipped
       assertEquals(0, bm.computeInvalidateWork(1));
       assertEquals(0, bm.getPendingDeletionBlocksCount());
@@ -158,8 +164,8 @@ public class TestComputeInvalidateWork {
     // Re-register each DN and see that it wipes the invalidation work
     for (DataNode dn : cluster.getDataNodes()) {
       DatanodeID did = dn.getDatanodeId();
-      did.setDatanodeUuidForTesting(UUID.randomUUID().toString());
-      DatanodeRegistration reg = new DatanodeRegistration(did,
+      DatanodeRegistration reg = new DatanodeRegistration(
+          new DatanodeID(UUID.randomUUID().toString(), did),
           new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
           new ExportedBlockKeys(),
           VersionInfo.getVersion());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e93eee9f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
index da858cd..ac7ebc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -173,7 +173,8 @@ public class TestDatanodeProtocolRetryPolicy {
         } else {
           DatanodeRegistration dr =
               (DatanodeRegistration) invocation.getArguments()[0];
-          datanodeRegistration.setDatanodeUuidForTesting(dr.getDatanodeUuid());
+          datanodeRegistration =
+              new DatanodeRegistration(dr.getDatanodeUuid(), dr);
           LOG.info("mockito succeeded " + datanodeRegistration);
           return datanodeRegistration;
         }


[35/50] [abbrv] hadoop git commit: YARN-3275. CapacityScheduler: Preemption happening on non-preemptable queues. Contributed by Eric Payne

Posted by ji...@apache.org.
YARN-3275. CapacityScheduler: Preemption happening on non-preemptable queues. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfc015f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfc015f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfc015f2

Branch: refs/heads/HDFS-7285
Commit: dfc015f295d92286ad570667556e837bd1d30134
Parents: fcae120
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Mar 6 22:36:18 2015 +0000
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Mar 9 13:11:26 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../hadoop/yarn/util/resource/Resources.java    |  5 ++++
 .../ProportionalCapacityPreemptionPolicy.java   | 27 ++++++++++++++++----
 ...estProportionalCapacityPreemptionPolicy.java | 24 +++++++++++++++++
 4 files changed, 54 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc015f2/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c2aa2ef..250fc1c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -719,6 +719,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired
     (Zhijie Shen via xgong)
 
+    YARN-3275. CapacityScheduler: Preemption happening on non-preemptable
+    queues (Eric Payne via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc015f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index a205bd1..bcb0421 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -260,4 +260,9 @@ public class Resources {
     return createResource(Math.min(lhs.getMemory(), rhs.getMemory()),
         Math.min(lhs.getVirtualCores(), rhs.getVirtualCores()));
   }
+  
+  public static Resource componentwiseMax(Resource lhs, Resource rhs) {
+    return createResource(Math.max(lhs.getMemory(), rhs.getMemory()),
+        Math.max(lhs.getVirtualCores(), rhs.getVirtualCores()));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc015f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 738f527..87a2a00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -527,6 +527,17 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
     List<RMContainer> skippedAMContainerlist = new ArrayList<RMContainer>();
 
     for (TempQueue qT : queues) {
+      if (qT.preemptionDisabled && qT.leafQueue != null) {
+        if (LOG.isDebugEnabled()) {
+          if (Resources.greaterThan(rc, clusterResource,
+              qT.toBePreempted, Resource.newInstance(0, 0))) {
+            LOG.debug("Tried to preempt the following "
+                      + "resources from non-preemptable queue: "
+                      + qT.queueName + " - Resources: " + qT.toBePreempted);
+          }
+        }
+        continue;
+      }
       // we act only if we are violating balance by more than
       // maxIgnoredOverCapacity
       if (Resources.greaterThan(rc, clusterResource, qT.current,
@@ -734,6 +745,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
       float absUsed = root.getAbsoluteUsedCapacity();
       float absCap = root.getAbsoluteCapacity();
       float absMaxCap = root.getAbsoluteMaximumCapacity();
+      boolean preemptionDisabled = root.getPreemptionDisabled();
 
       Resource current = Resources.multiply(clusterResources, absUsed);
       Resource guaranteed = Resources.multiply(clusterResources, absCap);
@@ -747,8 +759,8 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
         LeafQueue l = (LeafQueue) root;
         Resource pending = l.getTotalResourcePending();
         ret = new TempQueue(queueName, current, pending, guaranteed,
-            maxCapacity);
-        if (root.getPreemptionDisabled()) {
+            maxCapacity, preemptionDisabled);
+        if (preemptionDisabled) {
           ret.untouchableExtra = extra;
         } else {
           ret.preemptableExtra = extra;
@@ -757,7 +769,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
       } else {
         Resource pending = Resource.newInstance(0, 0);
         ret = new TempQueue(root.getQueueName(), current, pending, guaranteed,
-            maxCapacity);
+            maxCapacity, false);
         Resource childrensPreemptable = Resource.newInstance(0, 0);
         for (CSQueue c : root.getChildQueues()) {
           TempQueue subq = cloneQueues(c, clusterResources);
@@ -816,9 +828,10 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
 
     final ArrayList<TempQueue> children;
     LeafQueue leafQueue;
+    boolean preemptionDisabled;
 
     TempQueue(String queueName, Resource current, Resource pending,
-        Resource guaranteed, Resource maxCapacity) {
+        Resource guaranteed, Resource maxCapacity, boolean preemptionDisabled) {
       this.queueName = queueName;
       this.current = current;
       this.pending = pending;
@@ -831,6 +844,7 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
       this.children = new ArrayList<TempQueue>();
       this.untouchableExtra = Resource.newInstance(0, 0);
       this.preemptableExtra = Resource.newInstance(0, 0);
+      this.preemptionDisabled = preemptionDisabled;
     }
 
     public void setLeafQueue(LeafQueue l){
@@ -862,10 +876,13 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
     // the unused ones
     Resource offer(Resource avail, ResourceCalculator rc,
         Resource clusterResource) {
+      Resource absMaxCapIdealAssignedDelta = Resources.componentwiseMax(
+                      Resources.subtract(maxCapacity, idealAssigned),
+                      Resource.newInstance(0, 0));
       // remain = avail - min(avail, (max - assigned), (current + pending - assigned))
       Resource accepted = 
           Resources.min(rc, clusterResource, 
-              Resources.subtract(maxCapacity, idealAssigned),
+              absMaxCapIdealAssignedDelta,
           Resources.min(rc, clusterResource, avail, Resources.subtract(
               Resources.add(current, pending), idealAssigned)));
       Resource remain = Resources.subtract(avail, accepted);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfc015f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 696b9bb..8f5237e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -532,6 +532,30 @@ public class TestProportionalCapacityPreemptionPolicy {
   }
 
   @Test
+  public void testPerQueueDisablePreemptionOverAbsMaxCapacity() {
+    int[][] qData = new int[][] {
+        //  /    A              D
+        //            B    C         E    F
+        {1000, 725, 360, 365, 275,  17, 258 },  // absCap
+        {1000,1000,1000,1000, 550, 109,1000 },  // absMaxCap
+        {1000, 741, 396, 345, 259, 110, 149 },  // used
+        {  40,  20,   0,  20,  20,  20,   0 },  // pending
+        {   0,   0,   0,   0,   0,   0,   0 },  // reserved
+        //          appA appB     appC appD
+        {   4,   2,   1,   1,   2,   1,   1 },  // apps
+        {  -1,  -1,   1,   1,  -1,   1,   1 },  // req granulrity
+        {   2,   2,   0,   0,   2,   0,   0 },  // subqueues
+    };
+    // QueueE inherits non-preemption from QueueD
+    schedConf.setPreemptionDisabled("root.queueD", true);
+    ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
+    policy.editSchedule();
+    // appC is running on QueueE. QueueE is over absMaxCap, but is not
+    // preemptable. Therefore, appC resources should not be preempted.
+    verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC)));
+  }
+
+  @Test
   public void testOverCapacityImbalance() {
     int[][] qData = new int[][]{
       //  /   A   B   C