You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/05/21 01:48:25 UTC

[01/50] [abbrv] hadoop git commit: HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver properties. Contributed by Ray Chiang.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 15ccd967e -> 8966d4217


HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver properties. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbc01ed0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbc01ed0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbc01ed0

Branch: refs/heads/HDFS-7240
Commit: cbc01ed08ea36f70afca6112ccdbf7331567070b
Parents: 9a2a955
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri May 15 12:14:03 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri May 15 12:14:47 2015 +0900

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java  | 4 ++++
 2 files changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc01ed0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4df18ec..b90a773 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -761,6 +761,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8150. Make getFileChecksum fail for blocks under construction
     (J.Andreina via vinayakumarb)
 
+    HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver
+    properties. (Ray Chiang via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc01ed0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index a1f8a3c..ec0450a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -76,5 +76,9 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
 
     // Some properties have moved to HdfsClientConfigKeys
     xmlPropsToSkipCompare.add("dfs.client.short.circuit.replica.stale.threshold.ms");
+
+    // Ignore SpanReceiveHost properties
+    xmlPropsToSkipCompare.add("dfs.htrace.spanreceiver.classes");
+    xmlPropsToSkipCompare.add("dfs.client.htrace.spanreceiver.classes");
   }
 }


[23/50] [abbrv] hadoop git commit: HDFS-8405. Fix a typo in NamenodeFsck. Contributed by Takanobu Asanuma

Posted by ji...@apache.org.
HDFS-8405. Fix a typo in NamenodeFsck.  Contributed by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c590e1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c590e1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c590e1c

Branch: refs/heads/HDFS-7240
Commit: 0c590e1c097462979f7ee054ad9121345d58655b
Parents: a2190bf
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Tue May 19 02:57:54 2015 +0800
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Tue May 19 02:57:54 2015 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt       |  2 ++
 .../hadoop/hdfs/server/namenode/FsckServlet.java  |  2 +-
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java | 18 +++++++-----------
 .../hadoop/hdfs/server/namenode/TestFsck.java     |  8 ++++----
 4 files changed, 14 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c590e1c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4270a9c..7fd3495 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -868,6 +868,8 @@ Release 2.7.1 - UNRELEASED
     HDFS-6300. Prevent multiple balancers from running simultaneously
     (Rakesh R via vinayakumarb)
 
+    HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c590e1c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
index 6fb3d21..5fae9cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
@@ -66,7 +66,7 @@ public class FsckServlet extends DfsServlet {
               namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
           new NamenodeFsck(conf, nn,
               bm.getDatanodeManager().getNetworkTopology(), pmap, out,
-              totalDatanodes, bm.minReplication, remoteAddress).fsck();
+              totalDatanodes, remoteAddress).fsck();
           
           return null;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c590e1c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 61f8fdb..44dba28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -121,7 +121,6 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
   private final NameNode namenode;
   private final NetworkTopology networktopology;
   private final int totalDatanodes;
-  private final short minReplication;
   private final InetAddress remoteAddress;
 
   private String lostFound = null;
@@ -181,19 +180,17 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
    * @param pmap key=value[] map passed to the http servlet as url parameters
    * @param out output stream to write the fsck output
    * @param totalDatanodes number of live datanodes
-   * @param minReplication minimum replication
    * @param remoteAddress source address of the fsck request
    */
   NamenodeFsck(Configuration conf, NameNode namenode,
       NetworkTopology networktopology, 
       Map<String,String[]> pmap, PrintWriter out,
-      int totalDatanodes, short minReplication, InetAddress remoteAddress) {
+      int totalDatanodes, InetAddress remoteAddress) {
     this.conf = conf;
     this.namenode = namenode;
     this.networktopology = networktopology;
     this.out = out;
     this.totalDatanodes = totalDatanodes;
-    this.minReplication = minReplication;
     this.remoteAddress = remoteAddress;
     this.bpPolicy = BlockPlacementPolicy.getInstance(conf, null,
         networktopology,
@@ -308,7 +305,6 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     final long startTime = Time.monotonicNow();
     try {
       if(blockIds != null) {
-
         String[] blocks = blockIds.split(" ");
         StringBuilder sb = new StringBuilder();
         sb.append("FSCK started by " +
@@ -561,7 +557,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
       res.numExpectedReplicas += targetFileReplication;
 
       // count under min repl'd blocks
-      if(totalReplicasPerBlock < minReplication){
+      if(totalReplicasPerBlock < res.minReplication){
         res.numUnderMinReplicatedBlocks++;
       }
 
@@ -582,7 +578,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
       }
 
       // count minimally replicated blocks
-      if (totalReplicasPerBlock >= minReplication)
+      if (totalReplicasPerBlock >= res.minReplication)
         res.numMinReplicatedBlocks++;
 
       // count missing replicas / under replicated blocks
@@ -601,7 +597,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
                     decommissioningReplicas + " decommissioning replica(s).");
       }
 
-      // count mis replicated blocks block
+      // count mis replicated blocks
       BlockPlacementStatus blockPlacementStatus = bpPolicy
           .verifyBlockPlacement(path, lBlk, targetFileReplication);
       if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
@@ -724,8 +720,8 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
   private void countStorageTypeSummary(HdfsFileStatus file, LocatedBlock lBlk) {
     StorageType[] storageTypes = lBlk.getStorageTypes();
     storageTypeSummary.add(Arrays.copyOf(storageTypes, storageTypes.length),
-                           namenode.getNamesystem().getBlockManager()
-                               .getStoragePolicy(file.getStoragePolicy()));
+        namenode.getNamesystem().getBlockManager()
+        .getStoragePolicy(file.getStoragePolicy()));
   }
 
   private void deleteCorruptedFile(String path) {
@@ -1069,7 +1065,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
                 ((float) (numUnderMinReplicatedBlocks * 100) / (float) totalBlocks))
                 .append(" %)");
           }
-          res.append("\n  ").append("DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY:\t")
+          res.append("\n  ").append(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY + ":\t")
              .append(minReplication);
         }
         if(corruptFiles>0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c590e1c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 1ce09e1..7f31f84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -785,7 +785,7 @@ public class TestFsck {
       System.out.println(outStr);
       assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
-      assertTrue(outStr.contains("DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY:\t2"));
+      assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
@@ -1052,7 +1052,7 @@ public class TestFsck {
       PrintWriter out = new PrintWriter(result, true);
       InetAddress remoteAddress = InetAddress.getLocalHost();
       NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
-          NUM_REPLICAS, (short)1, remoteAddress);
+          NUM_REPLICAS, remoteAddress);
       
       // Run the fsck and check the Result
       final HdfsFileStatus file = 
@@ -1129,7 +1129,7 @@ public class TestFsck {
       PrintWriter out = new PrintWriter(result, true);
       InetAddress remoteAddress = InetAddress.getLocalHost();
       NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
-          NUM_DN, REPL_FACTOR, remoteAddress);
+          NUM_DN, remoteAddress);
       
       // Run the fsck and check the Result
       final HdfsFileStatus file = 
@@ -1176,7 +1176,7 @@ public class TestFsck {
     when(blockManager.getDatanodeManager()).thenReturn(dnManager);
 
     NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
-        NUM_REPLICAS, (short)1, remoteAddress);
+        NUM_REPLICAS, remoteAddress);
 
     String pathString = "/tmp/testFile";
 


[42/50] [abbrv] hadoop git commit: YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use NodeLabel object instead of String. (Naganarasimha G R via wangda)

Posted by ji...@apache.org.
YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use NodeLabel object instead of String. (Naganarasimha G R via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b37da52a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b37da52a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b37da52a

Branch: refs/heads/HDFS-7240
Commit: b37da52a1c4fb3da2bd21bfadc5ec61c5f953a59
Parents: 12d6c5c
Author: Wangda Tan <wa...@apache.org>
Authored: Tue May 19 16:34:17 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue May 19 16:34:17 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../src/main/proto/yarn_protos.proto            |  4 ---
 .../nodelabels/CommonNodeLabelsManager.java     |  2 ++
 .../yarn/nodelabels/NodeLabelTestBase.java      | 12 +++++++
 .../protocolrecords/NodeHeartbeatRequest.java   |  7 ++--
 .../RegisterNodeManagerRequest.java             |  7 ++--
 .../impl/pb/NodeHeartbeatRequestPBImpl.java     | 34 ++++++++++++++-----
 .../pb/RegisterNodeManagerRequestPBImpl.java    | 35 +++++++++++++++-----
 .../yarn_server_common_service_protos.proto     |  8 +++--
 .../hadoop/yarn/TestYarnServerApiClasses.java   | 19 ++++++-----
 .../nodemanager/NodeStatusUpdaterImpl.java      | 23 +++++++------
 .../nodelabels/NodeLabelsProvider.java          |  3 +-
 .../TestNodeStatusUpdaterForLabels.java         | 23 +++++++------
 .../resourcemanager/ResourceTrackerService.java | 18 ++++++++--
 .../TestResourceTrackerService.java             | 25 +++++++-------
 15 files changed, 149 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5a6fb38..ab6f488 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -247,6 +247,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3362. Add node label usage in RM CapacityScheduler web UI.
     (Naganarasimha G R via wangda)
 
+    YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use 
+    NodeLabel object instead of String. (Naganarasimha G R via wangda)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 4095676..3c4aa52 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -253,10 +253,6 @@ message NodeIdToLabelsProto {
   repeated string nodeLabels = 2;
 }
 
-message StringArrayProto {
-  repeated string elements = 1;
-}
-
 message LabelsToNodeIdsProto {
   optional string nodeLabels = 1;
   repeated NodeIdProto nodeId = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index bf34837..badf4d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -39,6 +39,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
 
+@Private
 public class CommonNodeLabelsManager extends AbstractService {
   protected static final Log LOG = LogFactory.getLog(CommonNodeLabelsManager.class);
   private static final int MAX_LABEL_LENGTH = 255;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
index 8301d96..f834d54 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java
@@ -112,6 +112,18 @@ public class NodeLabelTestBase {
     return set;
   }
   
+  @SuppressWarnings("unchecked")
+  public static Set<NodeLabel> toNodeLabelSet(String... nodeLabelsStr) {
+    if (null == nodeLabelsStr) {
+      return null;
+    }
+    Set<NodeLabel> labels = new HashSet<NodeLabel>();
+    for (String label : nodeLabelsStr) {
+      labels.add(NodeLabel.newInstance(label));
+    }
+    return labels;
+  }
+
   public NodeId toNodeId(String str) {
     if (str.contains(":")) {
       int idx = str.indexOf(':');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
index 767e4b0..84ca8a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.api.protocolrecords;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.util.Records;
@@ -29,7 +30,7 @@ public abstract class NodeHeartbeatRequest {
   
   public static NodeHeartbeatRequest newInstance(NodeStatus nodeStatus,
       MasterKey lastKnownContainerTokenMasterKey,
-      MasterKey lastKnownNMTokenMasterKey, Set<String> nodeLabels) {
+      MasterKey lastKnownNMTokenMasterKey, Set<NodeLabel> nodeLabels) {
     NodeHeartbeatRequest nodeHeartbeatRequest =
         Records.newRecord(NodeHeartbeatRequest.class);
     nodeHeartbeatRequest.setNodeStatus(nodeStatus);
@@ -50,8 +51,8 @@ public abstract class NodeHeartbeatRequest {
   public abstract MasterKey getLastKnownNMTokenMasterKey();
   public abstract void setLastKnownNMTokenMasterKey(MasterKey secretKey);
   
-  public abstract Set<String> getNodeLabels();
-  public abstract void setNodeLabels(Set<String> nodeLabels);
+  public abstract Set<NodeLabel> getNodeLabels();
+  public abstract void setNodeLabels(Set<NodeLabel> nodeLabels);
 
   public abstract List<LogAggregationReport>
       getLogAggregationReportsForApps();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
index bf09b33..7798ba9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
@@ -23,6 +23,7 @@ import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -39,7 +40,7 @@ public abstract class RegisterNodeManagerRequest {
   public static RegisterNodeManagerRequest newInstance(NodeId nodeId,
       int httpPort, Resource resource, String nodeManagerVersionId,
       List<NMContainerStatus> containerStatuses,
-      List<ApplicationId> runningApplications, Set<String> nodeLabels) {
+      List<ApplicationId> runningApplications, Set<NodeLabel> nodeLabels) {
     RegisterNodeManagerRequest request =
         Records.newRecord(RegisterNodeManagerRequest.class);
     request.setHttpPort(httpPort);
@@ -57,8 +58,8 @@ public abstract class RegisterNodeManagerRequest {
   public abstract Resource getResource();
   public abstract String getNMVersion();
   public abstract List<NMContainerStatus> getNMContainerStatuses();
-  public abstract Set<String> getNodeLabels();
-  public abstract void setNodeLabels(Set<String> nodeLabels);
+  public abstract Set<NodeLabel> getNodeLabels();
+  public abstract void setNodeLabels(Set<NodeLabel> nodeLabels);
   
   /**
    * We introduce this here because currently YARN RM doesn't persist nodes info

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
index 81f173d..0a9895e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
@@ -24,12 +24,16 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.hadoop.yarn.proto.YarnProtos.StringArrayProto;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto.Builder;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
@@ -45,7 +49,7 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
   private NodeStatus nodeStatus = null;
   private MasterKey lastKnownContainerTokenMasterKey = null;
   private MasterKey lastKnownNMTokenMasterKey = null;
-  private Set<String> labels = null;
+  private Set<NodeLabel> labels = null;
   private List<LogAggregationReport> logAggregationReportsForApps = null;
 
   public NodeHeartbeatRequestPBImpl() {
@@ -93,8 +97,11 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
     }
     if (this.labels != null) {
       builder.clearNodeLabels();
-      builder.setNodeLabels(StringArrayProto.newBuilder()
-          .addAllElements(this.labels).build());
+      Builder newBuilder = NodeLabelsProto.newBuilder();
+      for (NodeLabel label : labels) {
+        newBuilder.addNodeLabels(convertToProtoFormat(label));
+      }
+      builder.setNodeLabels(newBuilder.build());
     }
     if (this.logAggregationReportsForApps != null) {
       addLogAggregationStatusForAppsToProto();
@@ -238,13 +245,13 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
   }
 
   @Override
-  public Set<String> getNodeLabels() {
+  public Set<NodeLabel> getNodeLabels() {
     initNodeLabels();
     return this.labels;
   }
 
   @Override
-  public void setNodeLabels(Set<String> nodeLabels) {
+  public void setNodeLabels(Set<NodeLabel> nodeLabels) {
     maybeInitBuilder();
     builder.clearNodeLabels();
     this.labels = nodeLabels;
@@ -259,8 +266,19 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
       labels = null;
       return;
     }
-    StringArrayProto nodeLabels = p.getNodeLabels();
-    labels = new HashSet<String>(nodeLabels.getElementsList());
+    NodeLabelsProto nodeLabels = p.getNodeLabels();
+    labels = new HashSet<NodeLabel>();
+    for(NodeLabelProto nlp : nodeLabels.getNodeLabelsList()) {
+      labels.add(convertFromProtoFormat(nlp));
+    }
+  }
+
+  private NodeLabelPBImpl convertFromProtoFormat(NodeLabelProto p) {
+    return new NodeLabelPBImpl(p);
+  }
+
+  private NodeLabelProto convertToProtoFormat(NodeLabel t) {
+    return ((NodeLabelPBImpl)t).getProto();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
index 1d2bb82..5b0e0a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
@@ -27,16 +27,19 @@ import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
-import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdToLabelsProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
-import org.apache.hadoop.yarn.proto.YarnProtos.StringArrayProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto.Builder;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProtoOrBuilder;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
@@ -51,7 +54,7 @@ public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest
   private NodeId nodeId = null;
   private List<NMContainerStatus> containerStatuses = null;
   private List<ApplicationId> runningApplications = null;
-  private Set<String> labels = null;
+  private Set<NodeLabel> labels = null;
 
   public RegisterNodeManagerRequestPBImpl() {
     builder = RegisterNodeManagerRequestProto.newBuilder();
@@ -84,8 +87,11 @@ public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest
     }
     if (this.labels != null) {
       builder.clearNodeLabels();
-      builder.setNodeLabels(StringArrayProto.newBuilder()
-          .addAllElements(this.labels).build());
+      Builder newBuilder = NodeLabelsProto.newBuilder();
+      for (NodeLabel label : labels) {
+        newBuilder.addNodeLabels(convertToProtoFormat(label));
+      }
+      builder.setNodeLabels(newBuilder.build());
     }
   }
 
@@ -293,13 +299,13 @@ public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest
   }
   
   @Override
-  public Set<String> getNodeLabels() {
+  public Set<NodeLabel> getNodeLabels() {
     initNodeLabels();
     return this.labels;
   }
 
   @Override
-  public void setNodeLabels(Set<String> nodeLabels) {
+  public void setNodeLabels(Set<NodeLabel> nodeLabels) {
     maybeInitBuilder();
     builder.clearNodeLabels();
     this.labels = nodeLabels;
@@ -314,8 +320,19 @@ public class RegisterNodeManagerRequestPBImpl extends RegisterNodeManagerRequest
       labels=null;
       return;
     }
-    StringArrayProto nodeLabels = p.getNodeLabels();
-    labels = new HashSet<String>(nodeLabels.getElementsList());
+    NodeLabelsProto nodeLabels = p.getNodeLabels();
+    labels = new HashSet<NodeLabel>();
+    for(NodeLabelProto nlp : nodeLabels.getNodeLabelsList()) {
+      labels.add(convertFromProtoFormat(nlp));
+    }
+  }
+
+  private NodeLabelPBImpl convertFromProtoFormat(NodeLabelProto p) {
+    return new NodeLabelPBImpl(p);
+  }
+
+  private NodeLabelProto convertToProtoFormat(NodeLabel t) {
+    return ((NodeLabelPBImpl)t).getProto();
   }
 
   private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index c027ac0..f3735a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -25,6 +25,10 @@ package hadoop.yarn;
 import "yarn_protos.proto";
 import "yarn_server_common_protos.proto";
 
+message NodeLabelsProto {
+  repeated NodeLabelProto nodeLabels = 1;
+}
+
 message RegisterNodeManagerRequestProto {
   optional NodeIdProto node_id = 1;
   optional int32 http_port = 3;
@@ -32,7 +36,7 @@ message RegisterNodeManagerRequestProto {
   optional string nm_version = 5;
   repeated NMContainerStatusProto container_statuses = 6;
   repeated ApplicationIdProto runningApplications = 7;
-  optional StringArrayProto nodeLabels = 8;
+  optional NodeLabelsProto nodeLabels = 8;
 }
 
 message RegisterNodeManagerResponseProto {
@@ -49,7 +53,7 @@ message NodeHeartbeatRequestProto {
   optional NodeStatusProto node_status = 1;
   optional MasterKeyProto last_known_container_token_master_key = 2;
   optional MasterKeyProto last_known_nm_token_master_key = 3;
-  optional StringArrayProto nodeLabels = 4;
+  optional NodeLabelsProto nodeLabels = 4;
   repeated LogAggregationReportProto log_aggregation_reports_for_apps = 5;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index d42b2c7..f882657 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
@@ -113,7 +114,7 @@ public class TestYarnServerApiClasses {
     Assert.assertTrue(original.getNodeLabels()
         .containsAll(copy.getNodeLabels()));
     // check for empty labels
-    original.setNodeLabels(new HashSet<String> ());
+    original.setNodeLabels(new HashSet<NodeLabel> ());
     copy = new NodeHeartbeatRequestPBImpl(
         original.getProto());
     Assert.assertNotNull(copy.getNodeLabels());
@@ -271,7 +272,7 @@ public class TestYarnServerApiClasses {
 
   @Test
   public void testRegisterNodeManagerRequestWithValidLabels() {
-    HashSet<String> nodeLabels = getValidNodeLabels();
+    HashSet<NodeLabel> nodeLabels = getValidNodeLabels();
     RegisterNodeManagerRequest request =
         RegisterNodeManagerRequest.newInstance(
             NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
@@ -286,19 +287,19 @@ public class TestYarnServerApiClasses {
     Assert.assertEquals(true, nodeLabels.containsAll(copy.getNodeLabels()));
 
     // check for empty labels
-    request.setNodeLabels(new HashSet<String> ());
+    request.setNodeLabels(new HashSet<NodeLabel> ());
     copy = new RegisterNodeManagerRequestPBImpl(
         ((RegisterNodeManagerRequestPBImpl) request).getProto());
     Assert.assertNotNull(copy.getNodeLabels());
     Assert.assertEquals(0, copy.getNodeLabels().size());
   }
 
-  private HashSet<String> getValidNodeLabels() {
-    HashSet<String> nodeLabels = new HashSet<String>();
-    nodeLabels.add("java");
-    nodeLabels.add("windows");
-    nodeLabels.add("gpu");
-    nodeLabels.add("x86");
+  private HashSet<NodeLabel> getValidNodeLabels() {
+    HashSet<NodeLabel> nodeLabels = new HashSet<NodeLabel>();
+    nodeLabels.add(NodeLabel.newInstance("java"));
+    nodeLabels.add(NodeLabel.newInstance("windows"));
+    nodeLabels.add(NodeLabel.newInstance("gpu"));
+    nodeLabels.add(NodeLabel.newInstance("x86"));
     return nodeLabels;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 8046228..b635c46 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -30,9 +30,9 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ConcurrentLinkedQueue;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -279,11 +280,11 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
   protected void registerWithRM()
       throws YarnException, IOException {
     List<NMContainerStatus> containerReports = getNMContainerStatuses();
-    Set<String> nodeLabels = null;
+    Set<NodeLabel> nodeLabels = null;
     if (hasNodeLabelsProvider) {
       nodeLabels = nodeLabelsProvider.getNodeLabels();
       nodeLabels =
-          (null == nodeLabels) ? CommonNodeLabelsManager.EMPTY_STRING_SET
+          (null == nodeLabels) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET
               : nodeLabels;
     }
     RegisterNodeManagerRequest request =
@@ -628,29 +629,29 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
       @SuppressWarnings("unchecked")
       public void run() {
         int lastHeartbeatID = 0;
-        Set<String> lastUpdatedNodeLabelsToRM = null;
+        Set<NodeLabel> lastUpdatedNodeLabelsToRM = null;
         if (hasNodeLabelsProvider) {
           lastUpdatedNodeLabelsToRM = nodeLabelsProvider.getNodeLabels();
           lastUpdatedNodeLabelsToRM =
-              (null == lastUpdatedNodeLabelsToRM) ? CommonNodeLabelsManager.EMPTY_STRING_SET
+              (null == lastUpdatedNodeLabelsToRM) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET
                   : lastUpdatedNodeLabelsToRM;
         }
         while (!isStopped) {
           // Send heartbeat
           try {
             NodeHeartbeatResponse response = null;
-            Set<String> nodeLabelsForHeartbeat = null;
+            Set<NodeLabel> nodeLabelsForHeartbeat = null;
             NodeStatus nodeStatus = getNodeStatus(lastHeartbeatID);
 
             if (hasNodeLabelsProvider) {
               nodeLabelsForHeartbeat = nodeLabelsProvider.getNodeLabels();
-              //if the provider returns null then consider empty labels are set
+              // if the provider returns null then consider empty labels are set
               nodeLabelsForHeartbeat =
-                  (nodeLabelsForHeartbeat == null) ? CommonNodeLabelsManager.EMPTY_STRING_SET
+                  (nodeLabelsForHeartbeat == null) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET
                       : nodeLabelsForHeartbeat;
               if (!areNodeLabelsUpdated(nodeLabelsForHeartbeat,
                   lastUpdatedNodeLabelsToRM)) {
-                //if nodelabels have not changed then no need to send
+                // if nodelabels have not changed then no need to send
                 nodeLabelsForHeartbeat = null;
               }
             }
@@ -781,8 +782,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
        * @param nodeLabelsOld
        * @return if the New node labels are diff from the older one.
        */
-      private boolean areNodeLabelsUpdated(Set<String> nodeLabelsNew,
-          Set<String> nodeLabelsOld) {
+      private boolean areNodeLabelsUpdated(Set<NodeLabel> nodeLabelsNew,
+          Set<NodeLabel> nodeLabelsOld) {
         if (nodeLabelsNew.size() != nodeLabelsOld.size()
             || !nodeLabelsOld.containsAll(nodeLabelsNew)) {
           return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeLabelsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeLabelsProvider.java
index 4b34d76..dab3709 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeLabelsProvider.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager.nodelabels;
 import java.util.Set;
 
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 
 /**
  * Interface which will be responsible for fetching the labels
@@ -39,5 +40,5 @@ public abstract class NodeLabelsProvider extends AbstractService {
    * 
    * @return Set of node label strings applicable for a node
    */
-  public abstract Set<String> getNodeLabels();
+  public abstract Set<NodeLabel> getNodeLabels();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
index 437e4c8..a0ed39b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
@@ -23,16 +23,17 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.Collections;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.nodelabels.NodeLabelTestBase;
 import org.apache.hadoop.yarn.server.api.ResourceTracker;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
@@ -71,7 +72,7 @@ public class TestNodeStatusUpdaterForLabels extends NodeLabelTestBase {
 
   private class ResourceTrackerForLabels implements ResourceTracker {
     int heartbeatID = 0;
-    Set<String> labels;
+    Set<NodeLabel> labels;
 
     private boolean receivedNMHeartbeat = false;
     private boolean receivedNMRegister = false;
@@ -185,18 +186,18 @@ public class TestNodeStatusUpdaterForLabels extends NodeLabelTestBase {
   public static class DummyNodeLabelsProvider extends NodeLabelsProvider {
 
     @SuppressWarnings("unchecked")
-    private Set<String> nodeLabels = Collections.EMPTY_SET;
+    private Set<NodeLabel> nodeLabels = CommonNodeLabelsManager.EMPTY_NODELABEL_SET;
 
     public DummyNodeLabelsProvider() {
       super(DummyNodeLabelsProvider.class.getName());
     }
 
     @Override
-    public synchronized Set<String> getNodeLabels() {
+    public synchronized Set<NodeLabel> getNodeLabels() {
       return nodeLabels;
     }
 
-    synchronized void setNodeLabels(Set<String> nodeLabels) {
+    synchronized void setNodeLabels(Set<NodeLabel> nodeLabels) {
       this.nodeLabels = nodeLabels;
     }
   }
@@ -245,19 +246,21 @@ public class TestNodeStatusUpdaterForLabels extends NodeLabelTestBase {
     resourceTracker.resetNMHeartbeatReceiveFlag();
     nm.start();
     resourceTracker.waitTillRegister();
-    assertCollectionEquals(resourceTracker.labels,
-        dummyLabelsProviderRef.getNodeLabels());
+    assertNLCollectionEquals(resourceTracker.labels,
+        dummyLabelsProviderRef
+            .getNodeLabels());
 
     resourceTracker.waitTillHeartbeat();// wait till the first heartbeat
     resourceTracker.resetNMHeartbeatReceiveFlag();
 
     // heartbeat with updated labels
-    dummyLabelsProviderRef.setNodeLabels(toSet("P"));
+    dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("P"));
 
     nm.getNodeStatusUpdater().sendOutofBandHeartBeat();
     resourceTracker.waitTillHeartbeat();
-    assertCollectionEquals(resourceTracker.labels,
-        dummyLabelsProviderRef.getNodeLabels());
+    assertNLCollectionEquals(resourceTracker.labels,
+        dummyLabelsProviderRef
+            .getNodeLabels());
     resourceTracker.resetNMHeartbeatReceiveFlag();
 
     // heartbeat without updating labels

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 16b6a89..4dc5c88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -22,6 +22,7 @@ import java.io.InputStream;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
@@ -42,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -247,6 +249,17 @@ public class ResourceTrackerService extends AbstractService implements
     }
   }
 
+  static Set<String> convertToStringSet(Set<NodeLabel> nodeLabels) {
+    if (null == nodeLabels) {
+      return null;
+    }
+    Set<String> labels = new HashSet<String>();
+    for (NodeLabel label : nodeLabels) {
+      labels.add(label.getName());
+    }
+    return labels;
+  }
+
   @SuppressWarnings("unchecked")
   @Override
   public RegisterNodeManagerResponse registerNodeManager(
@@ -346,7 +359,7 @@ public class ResourceTrackerService extends AbstractService implements
     }
 
     // Update node's labels to RM's NodeLabelManager.
-    Set<String> nodeLabels = request.getNodeLabels();
+    Set<String> nodeLabels = convertToStringSet(request.getNodeLabels());
     if (isDistributedNodeLabelsConf && nodeLabels != null) {
       try {
         updateNodeLabelsFromNMReport(nodeLabels, nodeId);
@@ -467,7 +480,8 @@ public class ResourceTrackerService extends AbstractService implements
     // 5. Update node's labels to RM's NodeLabelManager.
     if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) {
       try {
-        updateNodeLabelsFromNMReport(request.getNodeLabels(), nodeId);
+        updateNodeLabelsFromNMReport(
+            convertToStringSet(request.getNodeLabels()), nodeId);
         nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true);
       } catch (IOException ex) {
         //ensure the error message is captured and sent across in response

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index cc5f464..3474ed6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -353,14 +354,14 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     registerReq.setNodeId(nodeId);
     registerReq.setHttpPort(1234);
     registerReq.setNMVersion(YarnVersionInfo.getVersion());
-    registerReq.setNodeLabels(toSet("A"));
+    registerReq.setNodeLabels(toSet(NodeLabel.newInstance("A")));
     RegisterNodeManagerResponse response =
         resourceTrackerService.registerNodeManager(registerReq);
 
     Assert.assertEquals("Action should be normal on valid Node Labels",
         NodeAction.NORMAL, response.getNodeAction());
     assertCollectionEquals(nodeLabelsMgr.getNodeLabels().get(nodeId),
-        registerReq.getNodeLabels());
+        ResourceTrackerService.convertToStringSet(registerReq.getNodeLabels()));
     Assert.assertTrue("Valid Node Labels were not accepted by RM",
         response.getAreNodeLabelsAcceptedByRM());
     rm.stop();
@@ -402,7 +403,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     registerReq.setNodeId(nodeId);
     registerReq.setHttpPort(1234);
     registerReq.setNMVersion(YarnVersionInfo.getVersion());
-    registerReq.setNodeLabels(toSet("A", "B", "C"));
+    registerReq.setNodeLabels(toNodeLabelSet("A", "B", "C"));
     RegisterNodeManagerResponse response =
         resourceTrackerService.registerNodeManager(registerReq);
 
@@ -455,7 +456,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     req.setNodeId(nodeId);
     req.setHttpPort(1234);
     req.setNMVersion(YarnVersionInfo.getVersion());
-    req.setNodeLabels(toSet("#Y"));
+    req.setNodeLabels(toNodeLabelSet("#Y"));
     RegisterNodeManagerResponse response =
         resourceTrackerService.registerNodeManager(req);
 
@@ -506,7 +507,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     req.setNodeId(nodeId);
     req.setHttpPort(1234);
     req.setNMVersion(YarnVersionInfo.getVersion());
-    req.setNodeLabels(toSet("A"));
+    req.setNodeLabels(toNodeLabelSet("A"));
     RegisterNodeManagerResponse response =
         resourceTrackerService.registerNodeManager(req);
     // registered to RM with central label config
@@ -568,14 +569,14 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     registerReq.setNodeId(nodeId);
     registerReq.setHttpPort(1234);
     registerReq.setNMVersion(YarnVersionInfo.getVersion());
-    registerReq.setNodeLabels(toSet("A")); // Node register label
+    registerReq.setNodeLabels(toNodeLabelSet("A")); // Node register label
     RegisterNodeManagerResponse registerResponse =
         resourceTrackerService.registerNodeManager(registerReq);
 
     // modification of labels during heartbeat
     NodeHeartbeatRequest heartbeatReq =
         Records.newRecord(NodeHeartbeatRequest.class);
-    heartbeatReq.setNodeLabels(toSet("B")); // Node heartbeat label update
+    heartbeatReq.setNodeLabels(toNodeLabelSet("B")); // Node heartbeat label update
     NodeStatus nodeStatusObject = getNodeStatusObject(nodeId);
     heartbeatReq.setNodeStatus(nodeStatusObject);
     heartbeatReq.setLastKnownNMTokenMasterKey(registerResponse
@@ -588,7 +589,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     Assert.assertEquals("InValid Node Labels were not accepted by RM",
         NodeAction.NORMAL, nodeHeartbeatResponse.getNodeAction());
     assertCollectionEquals(nodeLabelsMgr.getNodeLabels().get(nodeId),
-        heartbeatReq.getNodeLabels());
+        ResourceTrackerService.convertToStringSet(heartbeatReq.getNodeLabels()));
     Assert.assertTrue("Valid Node Labels were not accepted by RM",
         nodeHeartbeatResponse.getAreNodeLabelsAcceptedByRM());
     
@@ -652,13 +653,13 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     registerReq.setNodeId(nodeId);
     registerReq.setHttpPort(1234);
     registerReq.setNMVersion(YarnVersionInfo.getVersion());
-    registerReq.setNodeLabels(toSet("A"));
+    registerReq.setNodeLabels(toNodeLabelSet("A"));
     RegisterNodeManagerResponse registerResponse =
         resourceTrackerService.registerNodeManager(registerReq);
 
     NodeHeartbeatRequest heartbeatReq =
         Records.newRecord(NodeHeartbeatRequest.class);
-    heartbeatReq.setNodeLabels(toSet("B", "#C")); // Invalid heart beat labels
+    heartbeatReq.setNodeLabels(toNodeLabelSet("B", "#C")); // Invalid heart beat labels
     heartbeatReq.setNodeStatus(getNodeStatusObject(nodeId));
     heartbeatReq.setLastKnownNMTokenMasterKey(registerResponse
         .getNMTokenMasterKey());
@@ -705,13 +706,13 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     req.setNodeId(nodeId);
     req.setHttpPort(1234);
     req.setNMVersion(YarnVersionInfo.getVersion());
-    req.setNodeLabels(toSet("A", "B", "C"));
+    req.setNodeLabels(toNodeLabelSet("A", "B", "C"));
     RegisterNodeManagerResponse registerResponse =
         resourceTrackerService.registerNodeManager(req);
 
     NodeHeartbeatRequest heartbeatReq =
         Records.newRecord(NodeHeartbeatRequest.class);
-    heartbeatReq.setNodeLabels(toSet("B")); // Valid heart beat labels
+    heartbeatReq.setNodeLabels(toNodeLabelSet("B")); // Valid heart beat labels
     heartbeatReq.setNodeStatus(getNodeStatusObject(nodeId));
     heartbeatReq.setLastKnownNMTokenMasterKey(registerResponse
         .getNMTokenMasterKey());


[34/50] [abbrv] hadoop git commit: YARN-3302. TestDockerContainerExecutor should run automatically if it can detect docker in the usual place (Ravindra Kumar Naik via raviprak)

Posted by ji...@apache.org.
YARN-3302. TestDockerContainerExecutor should run automatically if it can detect docker in the usual place (Ravindra Kumar Naik via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c97f32e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c97f32e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c97f32e7

Branch: refs/heads/HDFS-7240
Commit: c97f32e7b9d9e1d4c80682cc01741579166174d1
Parents: 5009ad4
Author: Ravi Prakash <ra...@altiscale.com>
Authored: Tue May 19 10:28:11 2015 -0700
Committer: Ravi Prakash <ra...@altiscale.com>
Committed: Tue May 19 10:28:11 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../TestDockerContainerExecutor.java            | 27 +++++++++++++++-----
 2 files changed, 24 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97f32e7/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e17e9c7..34cd051 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -417,6 +417,9 @@ Release 2.8.0 - UNRELEASED
     YARN-2421. RM still allocates containers to an app in the FINISHING
     state (Chang Li via jlowe)
 
+    YARN-3302. TestDockerContainerExecutor should run automatically if it can
+    detect docker in the usual place (Ravindra Kumar Naik via raviprak)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97f32e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
index 65e381c..9386897 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
@@ -51,10 +51,11 @@ import com.google.common.base.Strings;
  * This is intended to test the DockerContainerExecutor code, but it requires
  * docker to be installed.
  * <br><ol>
- * <li>Install docker, and Compile the code with docker-service-url set to the
- * host and port where docker service is running.
+ * <li>To run the tests, set the docker-service-url to the host and port where
+ * docker service is running (If docker-service-url is not specified then the
+ * local daemon will be used).
  * <br><pre><code>
- * > mvn clean install -Ddocker-service-url=tcp://0.0.0.0:4243 -DskipTests
+ * mvn test -Ddocker-service-url=tcp://0.0.0.0:4243 -Dtest=TestDockerContainerExecutor
  * </code></pre>
  */
 public class TestDockerContainerExecutor {
@@ -98,10 +99,13 @@ public class TestDockerContainerExecutor {
 
     dockerUrl = System.getProperty("docker-service-url");
     LOG.info("dockerUrl: " + dockerUrl);
-    if (Strings.isNullOrEmpty(dockerUrl)) {
+    if (!Strings.isNullOrEmpty(dockerUrl)) {
+      dockerUrl = " -H " + dockerUrl;
+    } else if(isDockerDaemonRunningLocally()) {
+      dockerUrl = "";
+    } else {
       return;
     }
-    dockerUrl = " -H " + dockerUrl;
     dockerExec = "docker " + dockerUrl;
     conf.set(
       YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
@@ -136,6 +140,17 @@ public class TestDockerContainerExecutor {
     return exec != null;
   }
 
+  private boolean isDockerDaemonRunningLocally() {
+    boolean dockerDaemonRunningLocally = true;
+      try {
+        shellExec("docker info");
+      } catch (Exception e) {
+        LOG.info("docker daemon is not running on local machine.");
+        dockerDaemonRunningLocally = false;
+      }
+      return dockerDaemonRunningLocally;
+  }
+
   /**
    * Test that a docker container can be launched to run a command
    * @param cId a fake ContainerID
@@ -200,7 +215,7 @@ public class TestDockerContainerExecutor {
    * Test that a touch command can be launched successfully in a docker
    * container
    */
-  @Test
+  @Test(timeout=1000000)
   public void testLaunchContainer() throws IOException {
     if (!shouldRun()) {
       LOG.warn("Docker not installed, aborting test.");


[03/50] [abbrv] hadoop git commit: HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee7beda6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee7beda6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee7beda6

Branch: refs/heads/HDFS-7240
Commit: ee7beda6e3c640685c02185a76bed56eb85731fa
Parents: cbc01ed
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri May 15 13:54:35 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri May 15 13:54:35 2015 +0900

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |    3 +
 .../src/main/docs/changes/ChangesFancyStyle.css |  170 --
 .../main/docs/changes/ChangesSimpleStyle.css    |   49 -
 .../src/main/docs/changes/changes2html.pl       |  286 ----
 .../hadoop-hdfs/src/main/docs/releasenotes.html |    1 -
 .../src/main/docs/src/documentation/README.txt  |    7 -
 .../classes/CatalogManager.properties           |   40 -
 .../main/docs/src/documentation/conf/cli.xconf  |  327 ----
 .../src/documentation/content/xdocs/index.xml   |   46 -
 .../src/documentation/content/xdocs/site.xml    |  289 ----
 .../src/documentation/content/xdocs/tabs.xml    |   37 -
 .../src/documentation/content/xdocs/webhdfs.xml | 1577 ------------------
 .../resources/images/FI-framework.gif           |  Bin 30985 -> 0 bytes
 .../resources/images/FI-framework.odg           |  Bin 80461 -> 0 bytes
 .../resources/images/architecture.gif           |  Bin 15461 -> 0 bytes
 .../resources/images/core-logo.gif              |  Bin 6665 -> 0 bytes
 .../documentation/resources/images/favicon.ico  |  Bin 766 -> 0 bytes
 .../resources/images/hadoop-logo-big.jpg        |  Bin 127869 -> 0 bytes
 .../resources/images/hadoop-logo.jpg            |  Bin 9443 -> 0 bytes
 .../resources/images/request-identify.jpg       |  Bin 39731 -> 0 bytes
 .../main/docs/src/documentation/skinconf.xml    |  366 ----
 .../hadoop-hdfs/src/main/docs/status.xml        |   75 -
 22 files changed, 3 insertions(+), 3270 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b90a773..445b7c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -549,6 +549,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-6184. Capture NN's thread dump when it fails over.
     (Ming Ma via aajisaka)
 
+    HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff.
+    (Brahma Reddy Battula via aajisaka)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
deleted file mode 100644
index 5eef241..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * General
- */
-
-img { border: 0; }
-
-#content table {
-  border: 0;
-  width: 100%;
-}
-/*Hack to get IE to render the table at 100%*/
-* html #content table { margin-left: -3px; }
-
-#content th,
-#content td {
-  margin: 0;
-  padding: 0;
-  vertical-align: top;
-}
-
-.clearboth {
-  clear: both;
-}
-
-.note, .warning, .fixme {
-  border: solid black 1px;
-  margin: 1em 3em;
-}
-
-.note .label {
-  background: #369;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.note .content {
-  background: #F0F0FF;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.warning .label {
-  background: #C00;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.warning .content {
-  background: #FFF0F0;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.fixme .label {
-  background: #C6C600;
-  color: black;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.fixme .content {
-  padding: 5px 10px;
-}
-
-/**
- * Typography
- */
-
-body {
-  font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
-  font-size: 100%;
-}
-
-#content {
-  font-family: Georgia, Palatino, Times, serif;
-  font-size: 95%;
-}
-#tabs {
-  font-size: 70%;
-}
-#menu {
-  font-size: 80%;
-}
-#footer {
-  font-size: 70%;
-}
-
-h1, h2, h3, h4, h5, h6 {
-  font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
-  font-weight: bold;
-  margin-top: 1em;
-  margin-bottom: .5em;
-}
-
-h1 {
-    margin-top: 0;
-    margin-bottom: 1em;
-  font-size: 1.4em;
-  background-color: 73CAFF
-}
-#content h1 {
-  font-size: 160%;
-  margin-bottom: .5em;
-}
-#menu h1 {
-  margin: 0;
-  padding: 10px;
-  background: #336699;
-  color: white;
-}
-h2 { 
-  font-size: 120%;
-  background-color: 73CAFF
-}
-h3 { font-size: 100%; }
-h4 { font-size: 90%; }
-h5 { font-size: 80%; }
-h6 { font-size: 75%; }
-
-p {
-  line-height: 120%;
-  text-align: left;
-  margin-top: .5em;
-  margin-bottom: 1em;
-}
-
-#content li,
-#content th,
-#content td,
-#content li ul,
-#content li ol{
-  margin-top: .5em;
-  margin-bottom: .5em;
-}
-
-
-#content li li,
-#minitoc-area li{
-  margin-top: 0em;
-  margin-bottom: 0em;
-}
-
-#content .attribution {
-  text-align: right;
-  font-style: italic;
-  font-size: 85%;
-  margin-top: 1em;
-}
-
-.codefrag {
-  font-family: "Courier New", Courier, monospace;
-  font-size: 110%;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesSimpleStyle.css
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesSimpleStyle.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesSimpleStyle.css
deleted file mode 100644
index 407d0f1..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesSimpleStyle.css
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-body {
-  font-family: Courier New, monospace;
-  font-size: 10pt;
-}
-
-h1 {
-  font-family: Courier New, monospace;
-  font-size: 10pt;
-}
-
-h2 {
-  font-family: Courier New, monospace;
-  font-size: 10pt; 
-}
-
-h3 {
-  font-family: Courier New, monospace;
-  font-size: 10pt; 
-}
-
-a:link {
-  color: blue;
-}
-
-a:visited {
-  color: purple; 
-}
-
-li {
-  margin-top: 1em;
-  margin-bottom: 1em;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/changes2html.pl
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/changes2html.pl b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/changes2html.pl
deleted file mode 100755
index 4431e57..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/changes2html.pl
+++ /dev/null
@@ -1,286 +0,0 @@
-#!/usr/bin/perl
-#
-# Transforms Lucene Java's CHANGES.txt into Changes.html
-#
-# Input is on STDIN, output is to STDOUT
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-use strict;
-use warnings;
-
-my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
-my $title = undef;
-my $release = undef;
-my $sections = undef;
-my $items = undef;
-my $first_relid = undef;
-my $second_relid = undef;
-my @releases = ();
-
-my @lines = <>;                        # Get all input at once
-
-#
-# Parse input and build hierarchical release structure in @releases
-#
-for (my $line_num = 0 ; $line_num <= $#lines ; ++$line_num) {
-  $_ = $lines[$line_num];
-  next unless (/\S/);                  # Skip blank lines
-
-  unless ($title) {
-    if (/\S/) {
-      s/^\s+//;                        # Trim leading whitespace
-      s/\s+$//;                        # Trim trailing whitespace
-    }
-    $title = $_;
-    next;
-  }
-
-  if (/^(Release)|(Trunk)/) {   # Release headings
-    $release = $_;
-    $sections = [];
-    push @releases, [ $release, $sections ];
-    ($first_relid = lc($release)) =~ s/\s+/_/g   if ($#releases == 0);
-    ($second_relid = lc($release)) =~ s/\s+/_/g  if ($#releases == 1);
-    $items = undef;
-    next;
-  }
-
-  # Section heading: 2 leading spaces, words all capitalized
-  if (/^  ([A-Z]+)\s*/) {
-    my $heading = $_;
-    $items = [];
-    push @$sections, [ $heading, $items ];
-    next;
-  }
-
-  # Handle earlier releases without sections - create a headless section
-  unless ($items) {
-    $items = [];
-    push @$sections, [ undef, $items ];
-  }
-
-  my $type;
-  if (@$items) { # A list item has been encountered in this section before
-    $type = $items->[0];  # 0th position of items array is list type
-  } else {
-    $type = get_list_type($_);
-    push @$items, $type;
-  }
-
-  if ($type eq 'numbered') { # The modern items list style
-    # List item boundary is another numbered item or an unindented line
-    my $line;
-    my $item = $_;
-    $item =~ s/^(\s{0,2}\d+\.\s*)//;       # Trim the leading item number
-    my $leading_ws_width = length($1);
-    $item =~ s/\s+$//;                     # Trim trailing whitespace
-    $item .= "\n";
-
-    while ($line_num < $#lines
-           and ($line = $lines[++$line_num]) !~ /^(?:\s{0,2}\d+\.\s*\S|\S)/) {
-      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
-      $line =~ s/\s+$//;                   # Trim trailing whitespace
-      $item .= "$line\n";
-    }
-    $item =~ s/\n+\Z/\n/;                  # Trim trailing blank lines
-    push @$items, $item;
-    --$line_num unless ($line_num == $#lines);
-  } elsif ($type eq 'paragraph') {         # List item boundary is a blank line
-    my $line;
-    my $item = $_;
-    $item =~ s/^(\s+)//;
-    my $leading_ws_width = defined($1) ? length($1) : 0;
-    $item =~ s/\s+$//;                     # Trim trailing whitespace
-    $item .= "\n";
-
-    while ($line_num < $#lines and ($line = $lines[++$line_num]) =~ /\S/) {
-      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
-      $line =~ s/\s+$//;                   # Trim trailing whitespace
-      $item .= "$line\n";
-    }
-    push @$items, $item;
-    --$line_num unless ($line_num == $#lines);
-  } else { # $type is one of the bulleted types
-    # List item boundary is another bullet or a blank line
-    my $line;
-    my $item = $_;
-    $item =~ s/^(\s*$type\s*)//;           # Trim the leading bullet
-    my $leading_ws_width = length($1);
-    $item =~ s/\s+$//;                     # Trim trailing whitespace
-    $item .= "\n";
-
-    while ($line_num < $#lines
-           and ($line = $lines[++$line_num]) !~ /^\s*(?:$type|\Z)/) {
-      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
-      $line =~ s/\s+$//;                   # Trim trailing whitespace
-      $item .= "$line\n";
-    }
-    push @$items, $item;
-    --$line_num unless ($line_num == $#lines);
-  }
-}
-
-#
-# Print HTML-ified version to STDOUT
-#
-print<<"__HTML_HEADER__";
-<!--
-**********************************************************
-** WARNING: This file is generated from CHANGES.txt by the 
-**          Perl script 'changes2html.pl'.
-**          Do *not* edit this file!
-**********************************************************
-          
-****************************************************************************
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-****************************************************************************
--->
-<html>
-<head>
-  <title>$title</title>
-  <link rel="stylesheet" href="ChangesFancyStyle.css" title="Fancy">
-  <link rel="alternate stylesheet" href="ChangesSimpleStyle.css" title="Simple">
-  <META http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
-  <SCRIPT>
-    function toggleList(e) {
-      element = document.getElementById(e).style;
-      element.display == 'none' ? element.display = 'block' : element.display='none';
-    }
-    function collapse() {
-      for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
-        var list = document.getElementsByTagName("ul")[i];
-        if (list.id != '$first_relid' && list.id != '$second_relid') {
-          list.style.display = "none";
-        }
-      }
-      for (var i = 0; i < document.getElementsByTagName("ol").length; i++) {
-        document.getElementsByTagName("ol")[i].style.display = "none"; 
-      }
-    }
-    window.onload = collapse;
-  </SCRIPT>
-</head>
-<body>
-
-<a href="http://hadoop.apache.org/core/"><img class="logoImage" alt="Hadoop" src="images/hadoop-logo.jpg" title="Scalable Computing Platform"></a>
-<h1>$title</h1>
-
-__HTML_HEADER__
-
-my $heading;
-my $relcnt = 0;
-my $header = 'h2';
-for my $rel (@releases) {
-  if (++$relcnt == 3) {
-    $header = 'h3';
-    print "<h2><a href=\"javascript:toggleList('older')\">";
-    print "Older Releases";
-    print "</a></h2>\n";
-    print "<ul id=\"older\">\n"
-  }
-      
-  ($release, $sections) = @$rel;
-
-  # The first section heading is undefined for the older sectionless releases
-  my $has_release_sections = $sections->[0][0];
-
-  (my $relid = lc($release)) =~ s/\s+/_/g;
-  print "<$header><a href=\"javascript:toggleList('$relid')\">";
-  print "$release";
-  print "</a></$header>\n";
-  print "<ul id=\"$relid\">\n"
-    if ($has_release_sections);
-
-  for my $section (@$sections) {
-    ($heading, $items) = @$section;
-    (my $sectid = lc($heading)) =~ s/\s+/_/g;
-    my $numItemsStr = $#{$items} > 0 ? "($#{$items})" : "(none)";  
-
-    print "  <li><a href=\"javascript:toggleList('$relid.$sectid')\">",
-          ($heading || ''), "</a>&nbsp;&nbsp;&nbsp;$numItemsStr\n"
-      if ($has_release_sections);
-
-    my $list_type = $items->[0] || '';
-    my $list = ($has_release_sections || $list_type eq 'numbered' ? 'ol' : 'ul');
-    my $listid = $sectid ? "$relid.$sectid" : $relid;
-    print "    <$list id=\"$listid\">\n";
-
-    for my $itemnum (1..$#{$items}) {
-      my $item = $items->[$itemnum];
-      $item =~ s:&:&amp;:g;                            # Escape HTML metachars
-      $item =~ s:<:&lt;:g; 
-      $item =~ s:>:&gt;:g;
-
-      $item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:;       # Separate attribution
-      $item =~ s:\n{2,}:\n<p/>\n:g;                    # Keep paragraph breaks
-      $item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)}  # Link to JIRA Common
-                {<a href="${jira_url_prefix}$1">$1</a>}g;
-      $item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)}    # Link to JIRA Hdfs
-                {<a href="${jira_url_prefix}$1">$1</a>}g;
-      $item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)}  # Link to JIRA MR
-                {<a href="${jira_url_prefix}$1">$1</a>}g;
-      print "      <li>$item</li>\n";
-    }
-    print "    </$list>\n";
-    print "  </li>\n" if ($has_release_sections);
-  }
-  print "</ul>\n" if ($has_release_sections);
-}
-print "</ul>\n" if ($relcnt > 3);
-print "</body>\n</html>\n";
-
-
-#
-# Subroutine: get_list_type
-#
-# Takes one parameter:
-#
-#    - The first line of a sub-section/point
-#
-# Returns one scalar:
-#
-#    - The list type: 'numbered'; or one of the bulleted types '-', or '.' or
-#      'paragraph'.
-#
-sub get_list_type {
-  my $first_list_item_line = shift;
-  my $type = 'paragraph'; # Default to paragraph type
-
-  if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) {
-    $type = 'numbered';
-  } elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) {
-    $type = $1;
-  }
-  return $type;
-}
-
-1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html
deleted file mode 100644
index 3557e06..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/releasenotes.html
+++ /dev/null
@@ -1 +0,0 @@
-THIS IS A PLACEHOLDER.  REAL RELEASE NOTES WILL BE ADDED TO THIS FILE IN RELEASE BRANCHES.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/README.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/README.txt b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/README.txt
deleted file mode 100644
index 9bc261b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/README.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-This is the base documentation directory.
-
-skinconf.xml     # This file customizes Forrest for your project. In it, you
-                 # tell forrest the project name, logo, copyright info, etc
-
-sitemap.xmap     # Optional. This sitemap is consulted before all core sitemaps.
-                 # See http://forrest.apache.org/docs/project-sitemap.html

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/classes/CatalogManager.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/classes/CatalogManager.properties b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/classes/CatalogManager.properties
deleted file mode 100644
index b9cb584..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/classes/CatalogManager.properties
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#=======================================================================
-# CatalogManager.properties
-#
-# This is the default properties file for Apache Forrest.
-# This facilitates local configuration of application-specific catalogs.
-#
-# See the Apache Forrest documentation:
-# http://forrest.apache.org/docs/your-project.html
-# http://forrest.apache.org/docs/validation.html
-
-# verbosity ... level of messages for status/debug
-# See forrest/src/core/context/WEB-INF/cocoon.xconf
-
-# catalogs ... list of additional catalogs to load
-#  (Note that Apache Forrest will automatically load its own default catalog
-#  from src/core/context/resources/schema/catalog.xcat)
-# use full pathnames
-# pathname separator is always semi-colon (;) regardless of operating system
-# directory separator is always slash (/) regardless of operating system
-#
-#catalogs=/home/me/forrest/my-site/src/documentation/resources/schema/catalog.xcat
-catalogs=
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/conf/cli.xconf
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/conf/cli.xconf b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/conf/cli.xconf
deleted file mode 100644
index 5c6e245..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/conf/cli.xconf
+++ /dev/null
@@ -1,327 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!--+
-    |  This is the Apache Cocoon command line configuration file.
-    |  Here you give the command line interface details of where
-    |  to find various aspects of your Cocoon installation.
-    |
-    |  If you wish, you can also use this file to specify the URIs
-    |  that you wish to generate.
-    |
-    |  The current configuration information in this file is for
-    |  building the Cocoon documentation. Therefore, all links here
-    |  are relative to the build context dir, which, in the build.xml
-    |  file, is set to ${build.context}
-    |
-    |  Options:
-    |    verbose:            increase amount of information presented
-    |                        to standard output (default: false)
-    |    follow-links:       whether linked pages should also be
-    |                        generated (default: true)
-    |    precompile-only:    precompile sitemaps and XSP pages, but
-    |                        do not generate any pages (default: false)
-    |    confirm-extensions: check the mime type for the generated page
-    |                        and adjust filename and links extensions
-    |                        to match the mime type
-    |                        (e.g. text/html->.html)
-    |
-    |  Note: Whilst using an xconf file to configure the Cocoon
-    |        Command Line gives access to more features, the use of
-    |        command line parameters is more stable, as there are
-    |        currently plans to improve the xconf format to allow
-    |        greater flexibility. If you require a stable and
-    |        consistent method for accessing the CLI, it is recommended
-    |        that you use the command line parameters to configure
-    |        the CLI. See documentation at:
-    |        http://cocoon.apache.org/2.1/userdocs/offline/
-    |        http://wiki.apache.org/cocoon/CommandLine
-    |
-    +-->
-
-<cocoon verbose="true"
-        follow-links="true"
-        precompile-only="false"
-        confirm-extensions="false">
-
-   <!--+
-       |  The context directory is usually the webapp directory
-       |  containing the sitemap.xmap file.
-       |
-       |  The config file is the cocoon.xconf file.
-       |
-       |  The work directory is used by Cocoon to store temporary
-       |  files and cache files.
-       |
-       |  The destination directory is where generated pages will
-       |  be written (assuming the 'simple' mapper is used, see
-       |  below)
-       +-->
-   <context-dir>.</context-dir>
-   <config-file>WEB-INF/cocoon.xconf</config-file>
-   <work-dir>../tmp/cocoon-work</work-dir>
-   <dest-dir>../site</dest-dir>
-
-   <!--+
-       |  A checksum file can be used to store checksums for pages
-       |  as they are generated. When the site is next generated,
-       |  files will not be written if their checksum has not changed.
-       |  This means that it will be easier to detect which files
-       |  need to be uploaded to a server, using the timestamp.
-       |
-       |  The default path is relative to the core webapp directory.
-       |  An asolute path can be used.
-       +-->
-   <!--   <checksums-uri>build/work/checksums</checksums-uri>-->
-
-   <!--+
-       | Broken link reporting options:
-       |   Report into a text file, one link per line:
-       |     <broken-links type="text" report="filename"/>
-       |   Report into an XML file:
-       |     <broken-links type="xml" report="filename"/>
-       |   Ignore broken links (default):
-       |     <broken-links type="none"/>
-       |
-       |   Two attributes to this node specify whether a page should
-       |   be generated when an error has occured. 'generate' specifies
-       |   whether a page should be generated (default: true) and
-       |   extension specifies an extension that should be appended
-       |   to the generated page's filename (default: none)
-       |
-       |   Using this, a quick scan through the destination directory
-       |   will show broken links, by their filename extension.
-       +-->
-   <broken-links type="xml"
-                 file="../brokenlinks.xml"
-                 generate="false"
-                 extension=".error"
-                 show-referrers="true"/>
-
-   <!--+
-       |  Load classes at startup. This is necessary for generating
-       |  from sites that use SQL databases and JDBC.
-       |  The <load-class> element can be repeated if multiple classes
-       |  are needed.
-       +-->
-   <!--
-   <load-class>org.firebirdsql.jdbc.Driver</load-class>
-   -->
-
-   <!--+
-       |  Configures logging.
-       |  The 'log-kit' parameter specifies the location of the log kit
-       |  configuration file (usually called logkit.xconf.
-       |
-       |  Logger specifies the logging category (for all logging prior
-       |  to other Cocoon logging categories taking over)
-       |
-       |  Available log levels are:
-       |    DEBUG:        prints all level of log messages.
-       |    INFO:         prints all level of log messages except DEBUG
-       |                  ones.
-       |    WARN:         prints all level of log messages except DEBUG
-       |                  and INFO ones.
-       |    ERROR:        prints all level of log messages except DEBUG,
-       |                  INFO and WARN ones.
-       |    FATAL_ERROR:  prints only log messages of this level
-       +-->
-   <!-- <logging log-kit="WEB-INF/logkit.xconf" logger="cli" level="ERROR" /> -->
-
-   <!--+
-       |  Specifies the filename to be appended to URIs that
-       |  refer to a directory (i.e. end with a forward slash).
-       +-->
-   <default-filename>index.html</default-filename>
-
-   <!--+
-       |  Specifies a user agent string to the sitemap when
-       |  generating the site.
-       |
-       |  A generic term for a web browser is "user agent". Any
-       |  user agent, when connecting to a web server, will provide
-       |  a string to identify itself (e.g. as Internet Explorer or
-       |  Mozilla). It is possible to have Cocoon serve different
-       |  content depending upon the user agent string provided by
-       |  the browser. If your site does this, then you may want to
-       |  use this <user-agent> entry to provide a 'fake' user agent
-       |  to Cocoon, so that it generates the correct version of your
-       |  site.
-       |
-       |  For most sites, this can be ignored.
-       +-->
-   <!--
-   <user-agent>Cocoon Command Line Environment 2.1</user-agent>
-   -->
-
-   <!--+
-       |  Specifies an accept string to the sitemap when generating
-       |  the site.
-       |  User agents can specify to an HTTP server what types of content
-       |  (by mime-type) they are able to receive. E.g. a browser may be
-       |  able to handle jpegs, but not pngs. The HTTP accept header
-       |  allows the server to take the browser's capabilities into account,
-       |  and only send back content that it can handle.
-       |
-       |  For most sites, this can be ignored.
-       +-->
-
-   <accept>*/*</accept>
-
-   <!--+
-       | Specifies which URIs should be included or excluded, according
-       | to wildcard patterns.
-       |
-       | These includes/excludes are only relevant when you are following
-       | links. A link URI must match an include pattern (if one is given)
-       | and not match an exclude pattern, if it is to be followed by
-       | Cocoon. It can be useful, for example, where there are links in
-       | your site to pages that are not generated by Cocoon, such as
-       | references to api-documentation.
-       |
-       | By default, all URIs are included. If both include and exclude
-       | patterns are specified, a URI is first checked against the
-       | include patterns, and then against the exclude patterns.
-       |
-       | Multiple patterns can be given, using muliple include or exclude
-       | nodes.
-       |
-       | The order of the elements is not significant, as only the first
-       | successful match of each category is used.
-       |
-       | Currently, only the complete source URI can be matched (including
-       | any URI prefix). Future plans include destination URI matching
-       | and regexp matching. If you have requirements for these, contact
-       | dev@cocoon.apache.org.
-       +-->
-
-   <exclude pattern="**/"/>
-   <exclude pattern="api/**"/>
-   <exclude pattern="jdiff/**"/>
-   <exclude pattern="changes.html"/>
-   <exclude pattern="releasenotes.html"/>
-
-<!--
-  This is a workaround for FOR-284 "link rewriting broken when
-  linking to xml source views which contain site: links".
-  See the explanation there and in declare-broken-site-links.xsl
--->
-   <exclude pattern="site:**"/>
-   <exclude pattern="ext:**"/>
-   <exclude pattern="lm:**"/>
-   <exclude pattern="**/site:**"/>
-   <exclude pattern="**/ext:**"/>
-   <exclude pattern="**/lm:**"/>
-
-   <!-- Exclude tokens used in URLs to ASF mirrors (interpreted by a CGI) -->
-   <exclude pattern="[preferred]/**"/>
-   <exclude pattern="[location]"/>
-
-   <!--   <include-links extension=".html"/>-->
-
-   <!--+
-       |  <uri> nodes specify the URIs that should be generated, and
-       |  where required, what should be done with the generated pages.
-       |  They describe the way the URI of the generated file is created
-       |  from the source page's URI. There are three ways that a generated
-       |  file URI can be created: append, replace and insert.
-       |
-       |  The "type" attribute specifies one of (append|replace|insert):
-       |
-       |  append:
-       |  Append the generated page's URI to the end of the source URI:
-       |
-       |   <uri type="append" src-prefix="documents/" src="index.html"
-       |   dest="build/dest/"/>
-       |
-       |  This means that
-       |   (1) the "documents/index.html" page is generated
-       |   (2) the file will be written to "build/dest/documents/index.html"
-       |
-       |  replace:
-       |  Completely ignore the generated page's URI - just
-       |  use the destination URI:
-       |
-       |   <uri type="replace" src-prefix="documents/" src="index.html"
-       |   dest="build/dest/docs.html"/>
-       |
-       |  This means that
-       |   (1) the "documents/index.html" page is generated
-       |   (2) the result is written to "build/dest/docs.html"
-       |   (3) this works only for "single" pages - and not when links
-       |       are followed
-       |
-       |  insert:
-       |  Insert generated page's URI into the destination
-       |  URI at the point marked with a * (example uses fictional
-       |  zip protocol)
-       |
-       |   <uri type="insert" src-prefix="documents/" src="index.html"
-       |   dest="zip://*.zip/page.html"/>
-       |
-       |  This means that
-       |   (1)
-       |
-       |  In any of these scenarios, if the dest attribute is omitted,
-       |  the value provided globally using the <dest-dir> node will
-       |  be used instead.
-       +-->
-   <!--
-   <uri type="replace"
-        src-prefix="samples/"
-        src="hello-world/hello.html"
-        dest="build/dest/hello-world.html"/>
-   -->
-
-   <!--+
-       | <uri> nodes can be grouped together in a <uris> node. This
-       | enables a group of URIs to share properties. The following
-       | properties can be set for a group of URIs:
-       |   * follow-links:       should pages be crawled for links
-       |   * confirm-extensions: should file extensions be checked
-       |                         for the correct mime type
-       |   * src-prefix:         all source URIs should be
-       |                         pre-pended with this prefix before
-       |                         generation. The prefix is not
-       |                         included when calculating the
-       |                         destination URI
-       |   * dest:               the base destination URI to be
-       |                         shared by all pages in this group
-       |   * type:               the method to be used to calculate
-       |                         the destination URI. See above
-       |                         section on <uri> node for details.
-       |
-       | Each <uris> node can have a name attribute. When a name
-       | attribute has been specified, the -n switch on the command
-       | line can be used to tell Cocoon to only process the URIs
-       | within this URI group. When no -n switch is given, all
-       | <uris> nodes are processed. Thus, one xconf file can be
-       | used to manage multiple sites.
-       +-->
-   <!--
-   <uris name="mirrors" follow-links="false">
-     <uri type="append" src="mirrors.html"/>
-   </uris>
-   -->
-
-   <!--+
-       |  File containing URIs (plain text, one per line).
-       +-->
-   <!--
-   <uri-file>uris.txt</uri-file>
-   -->
-</cocoon>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/index.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/index.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/index.xml
deleted file mode 100644
index ff516c5..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/index.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
-
-<document>
-  <header>
-    <title>Overview</title>
-  </header>
-  <body>
-  
-  <p>
-  The HDFS Documentation provides the information you need to get started using the Hadoop Distributed File System. 
-  Begin with the <a href="hdfs_user_guide.html">HDFS Users Guide</a> to obtain an overview of the system and then
-  move on to the <a href="hdfs_design.html">HDFS Architecture Guide</a> for more detailed information.
-  </p>
-  
-  <p>
-   HDFS commonly works in tandem with a cluster environment and MapReduce applications. 
-   For information about Hadoop clusters (single or multi node) see the 
- <a href="http://hadoop.apache.org/common/docs/current/index.html">Hadoop Common Documentation</a>.
-   For information about MapReduce see the 
- <a href="http://hadoop.apache.org/mapreduce/docs/current/index.html">MapReduce Documentation</a>.
-  </p>   
-  
-<p>
-If you have more questions, you can ask on the <a href="ext:lists">HDFS Mailing Lists</a> or browse the <a href="ext:archive">Mailing List Archives</a>.
-</p>
-
-</body>
-</document>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
deleted file mode 100644
index ffb3219..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
+++ /dev/null
@@ -1,289 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-Forrest site.xml
-
-This file contains an outline of the site's information content.  It is used to:
-- Generate the website menus (though these can be overridden - see docs)
-- Provide semantic, location-independent aliases for internal 'site:' URIs, eg
-<link href="site:changes"> links to changes.html (or ../changes.html if in
-  subdir).
-- Provide aliases for external URLs in the external-refs section.  Eg, <link
-  href="ext:cocoon"> links to http://xml.apache.org/cocoon/
-
-See http://forrest.apache.org/docs/linking.html for more info.
--->
-
-<site label="Hadoop" href="" xmlns="http://apache.org/forrest/linkmap/1.0">
-  
-   <docs label="Getting Started"> 
-
-     <hdfs_user  label="HDFS Users "  href="hdfs_user_guide.html" />
-     <hdfs_arch  label="HDFS Architecture" href="hdfs_design.html" />	
-   </docs>
-   <docs label="Guides">
-      <hdfs_perm      		label="Permissions" href="hdfs_permissions_guide.html" />
-      <hdfs_quotas     	    label="Quotas" href="hdfs_quota_admin_guide.html" />
-      <hdfs_SLG        		label="Synthetic Load Generator"  href="SLG_user_guide.html" />
-      <hdfs_imageviewer	label="Offline Image Viewer"	href="hdfs_imageviewer.html" />
-      <hdfs_editsviewer	label="Offline Edits Viewer"	href="hdfs_editsviewer.html" />
-      <webhdfs label="WebHDFS REST API" href="webhdfs.html" />
-      <hftp 			    label="HFTP" href="hftp.html"/>
-      <faultinject_framework label="Fault Injection"  href="faultinject_framework.html" /> 
-      <hdfs_libhdfs   		label="C API libhdfs" href="libhdfs.html" /> 
-    </docs>
-   
-   <docs label="Miscellaneous"> 
-		<api       	label="API Docs"           href="ext:api/index" />
-		<jdiff     	label="API Changes"      href="ext:jdiff/changes" />
-		<wiki      	label="Wiki"               	href="ext:wiki" />
-		<faq       	label="FAQ"                	href="ext:faq" />
-		<relnotes  label="Release Notes" 	href="ext:relnotes" />
-		<changes	label="Change Log"       href="ext:changes" />
-   </docs> 
-   
-  <external-refs>
-    <site      href="http://hadoop.apache.org/hdfs/"/>
-    <lists     href="http://hadoop.apache.org/hdfs/mailing_lists.html"/>
-    <archive   href="http://mail-archives.apache.org/mod_mbox/hadoop-hdfs-commits/"/>
-    <releases  href="http://hadoop.apache.org/hdfs/releases.html">
-              <download href="#Download" />
-    </releases>
-    <jira      href="http://hadoop.apache.org/hdfs/issue_tracking.html"/>
-    <wiki      href="http://wiki.apache.org/hadoop/HDFS" />
-    <faq       href="http://wiki.apache.org/hadoop/HDFS/FAQ" />
-    
-    <common-default href="http://hadoop.apache.org/common/docs/current/common-default.html" />
-    <hdfs-default href="http://hadoop.apache.org/hdfs/docs/current/hdfs-default.html" />
-    <mapred-default href="http://hadoop.apache.org/mapreduce/docs/current/mapred-default.html" />
-    
-    <zlib      href="http://www.zlib.net/" />
-    <gzip      href="http://www.gzip.org/" />
-    <bzip      href="http://www.bzip.org/" />
-    <osx       href="http://www.apple.com/macosx" />
-    <hod href="">
-      <cluster-resources href="http://www.clusterresources.com" />
-      <torque href="http://www.clusterresources.com/pages/products/torque-resource-manager.php" />
-      <torque-download href="http://www.clusterresources.com/downloads/torque/" />
-      <torque-docs href="http://www.clusterresources.com/pages/resources/documentation.php" />
-      <torque-wiki href="http://www.clusterresources.com/wiki/doku.php?id=torque:torque_wiki" />
-      <torque-mailing-list href="http://www.clusterresources.com/pages/resources/mailing-lists.php" />
-      <torque-basic-config href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.2_basic_configuration" />
-      <torque-advanced-config href="http://www.clusterresources.com/wiki/doku.php?id=torque:1.3_advanced_configuration" />
-      <maui href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php"/>
-      <python href="http://www.python.org" />
-      <twisted-python href="http://twistedmatrix.com/trac/" />
-    </hod>
-    <relnotes href="releasenotes.html" />
-    <changes href="changes.html" />
-    <jdiff href="jdiff/">
-      <changes href="changes.html" />
-    </jdiff>
-    <api href="api/">
-      <index href="index.html" />
-      <org href="org/">
-        <apache href="apache/">
-          <hadoop href="hadoop/">
-            <conf href="conf/">
-              <configuration href="Configuration.html">
-                <final_parameters href="#FinalParams" />
-                <get href="#get(java.lang.String, java.lang.String)" />
-                <set href="#set(java.lang.String, java.lang.String)" />
-              </configuration>
-            </conf>
-            <filecache href="filecache/">
-              <distributedcache href="DistributedCache.html">
-                <addarchivetoclasspath href="#addArchiveToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)" />
-                <addfiletoclasspath href="#addFileToClassPath(org.apache.hadoop.fs.Path,%20org.apache.hadoop.conf.Configuration)" />
-                <addcachefile href="#addCacheFile(java.net.URI,%20org.apache.hadoop.conf.Configuration)" />
-                <addcachearchive href="#addCacheArchive(java.net.URI,%20org.apache.hadoop.conf.Configuration)" />
-                <setcachefiles href="#setCacheFiles(java.net.URI[],%20org.apache.hadoop.conf.Configuration)" />
-                <setcachearchives href="#setCacheArchives(java.net.URI[],%20org.apache.hadoop.conf.Configuration)" />
-                <createsymlink href="#createSymlink(org.apache.hadoop.conf.Configuration)" />
-              </distributedcache>  
-            </filecache>
-            <fs href="fs/">
-              <FileStatus href="FileStatus.html" />
-              <Path href="Path.html" />
-
-              <filesystem href="FileSystem.html">
-                <open href="#open(org.apache.hadoop.fs.Path,%20int)" />
-                <getFileStatus href="#getFileStatus(org.apache.hadoop.fs.Path)" />
-                <listStatus href="#listStatus(org.apache.hadoop.fs.Path)" />
-                <getContentSummary href="#getContentSummary(org.apache.hadoop.fs.Path)" />
-                <getFileChecksum href="#getFileChecksum(org.apache.hadoop.fs.Path)" />
-                <getHomeDirectory href="#getHomeDirectory()" />
-                <getDelegationToken href="#getDelegationToken(org.apache.hadoop.io.Text)" />
-
-                <create href="#create(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission,%20boolean,%20int,%20short,%20long,%20org.apache.hadoop.util.Progressable)" />
-                <mkdirs href="#mkdirs(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission)" />
-                <rename href="#rename(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.Options.Rename...)" />
-                <setReplication href="#setReplication(org.apache.hadoop.fs.Path,%20short)" />
-                <setOwner href="#setOwner(org.apache.hadoop.fs.Path,%20java.lang.String,%20java.lang.String)" />
-                <setPermission href="#setPermission(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission)" />
-                <setTimes href="#setTimes(org.apache.hadoop.fs.Path,%20long,%20long)" />
-
-                <append href="#append(org.apache.hadoop.fs.Path,%20int,%20org.apache.hadoop.util.Progressable)" />
-                <delete href="#delete(org.apache.hadoop.fs.Path,%20boolean)" />
-              </filesystem>
-            </fs>
-
-
-
-            <io href="io/">
-              <closeable href="Closeable.html">
-                <close href="#close()" />
-              </closeable>
-              <sequencefile href="SequenceFile.html" />
-              <sequencefilecompressiontype href="SequenceFile.CompressionType.html">
-                <none href="#NONE" />
-                <record href="#RECORD" />
-                <block href="#BLOCK" />
-              </sequencefilecompressiontype>
-              <writable href="Writable.html" />
-              <writablecomparable href="WritableComparable.html" />
-              <compress href="compress/">
-                <compressioncodec href="CompressionCodec.html" />
-              </compress>
-            </io>
-            <mapred href="mapred/">
-              <clusterstatus href="ClusterStatus.html" />
-              <counters href="Counters.html" />
-              <fileinputformat href="FileInputFormat.html">
-                 <setinputpaths href="#setInputPaths(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path[])" />
-                 <addinputpath href="#addInputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)" />
-                 <setinputpathstring href="#setInputPaths(org.apache.hadoop.mapred.JobConf,%20java.lang.String)" />
-                 <addinputpathstring href="#addInputPath(org.apache.hadoop.mapred.JobConf,%20java.lang.String)" />
-              </fileinputformat>
-              <fileoutputformat href="FileOutputFormat.html">
-                <getoutputpath href="#getOutputPath(org.apache.hadoop.mapred.JobConf)" />
-                <getworkoutputpath href="#getWorkOutputPath(org.apache.hadoop.mapred.JobConf)" />
-                <setoutputpath href="#setOutputPath(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.fs.Path)" />
-                <setcompressoutput href="#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)" />
-                <setoutputcompressorclass href="#setOutputCompressorClass(org.apache.hadoop.mapred.JobConf,%20java.lang.Class)" />
-              </fileoutputformat>
-              <filesplit href="FileSplit.html" />
-              <inputformat href="InputFormat.html" />
-              <inputsplit href="InputSplit.html" />
-              <isolationrunner href="IsolationRunner.html" />
-              <jobclient href="JobClient.html">
-                <runjob href="#runJob(org.apache.hadoop.mapred.JobConf)" />
-                <submitjob href="#submitJob(org.apache.hadoop.mapred.JobConf)" />
-              </jobclient>
-              <jobconf href="JobConf.html">
-                <setnummaptasks href="#setNumMapTasks(int)" />
-                <setnumreducetasks href="#setNumReduceTasks(int)" />
-                <setoutputkeycomparatorclass href="#setOutputKeyComparatorClass(java.lang.Class)" />
-                <setoutputvaluegroupingcomparator href="#setOutputValueGroupingComparator(java.lang.Class)" />
-                <setcombinerclass href="#setCombinerClass(java.lang.Class)" />
-                <setmapdebugscript href="#setMapDebugScript(java.lang.String)" />
-                <setreducedebugscript href="#setReduceDebugScript(java.lang.String)" />
-                <setmapspeculativeexecution href="#setMapSpeculativeExecution(boolean)" />
-                <setreducespeculativeexecution href="#setReduceSpeculativeExecution(boolean)" />
-                <setmaxmapattempts href="#setMaxMapAttempts(int)" />
-                <setmaxreduceattempts href="#setMaxReduceAttempts(int)" />
-                <setmaxmaptaskfailurespercent href="#setMaxMapTaskFailuresPercent(int)" />
-                <setmaxreducetaskfailurespercent href="#setMaxReduceTaskFailuresPercent(int)" />
-                <setjobendnotificationuri href="#setJobEndNotificationURI(java.lang.String)" />
-                <setcompressmapoutput href="#setCompressMapOutput(boolean)" />
-                <setmapoutputcompressorclass href="#setMapOutputCompressorClass(java.lang.Class)" />
-                <setprofileenabled href="#setProfileEnabled(boolean)" />
-                <setprofiletaskrange href="#setProfileTaskRange(boolean,%20java.lang.String)" />
-                <setprofileparams href="#setProfileParams(java.lang.String)" />
-                <setnumtaskstoexecuteperjvm href="#setNumTasksToExecutePerJvm(int)" />
-                <setqueuename href="#setQueueName(java.lang.String)" />
-                <getjoblocaldir href="#getJobLocalDir()" />
-                <getjar href="#getJar()" />
-              </jobconf>
-              <jobconfigurable href="JobConfigurable.html">
-                <configure href="#configure(org.apache.hadoop.mapred.JobConf)" />
-              </jobconfigurable>
-              <jobcontrol href="jobcontrol/">
-                <package-summary href="package-summary.html" />
-              </jobcontrol>
-              <mapper href="Mapper.html">
-                <map href="#map(K1, V1, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" />
-              </mapper>
-              <outputcollector href="OutputCollector.html">
-                <collect href="#collect(K, V)" />
-              </outputcollector>
-              <outputcommitter href="OutputCommitter.html" />
-              <outputformat href="OutputFormat.html" />
-              <outputlogfilter href="OutputLogFilter.html" />
-              <sequencefileoutputformat href="SequenceFileOutputFormat.html">
-                <setoutputcompressiontype href="#setOutputCompressionType(org.apache.hadoop.mapred.JobConf,%20org.apache.hadoop.io.SequenceFile.CompressionType)" />
-              </sequencefileoutputformat>
-              <partitioner href="Partitioner.html" />
-              <recordreader href="RecordReader.html" />
-              <recordwriter href="RecordWriter.html" />
-              <reducer href="Reducer.html">
-                <reduce href="#reduce(K2, java.util.Iterator, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)" />
-              </reducer>
-              <reporter href="Reporter.html">
-                <incrcounterEnum href="#incrCounter(java.lang.Enum, long)" />
-                <incrcounterString href="#incrCounter(java.lang.String, java.lang.String, long amount)" />
-              </reporter>
-              <runningjob href="RunningJob.html" />
-              <skipbadrecords href="SkipBadRecords.html">
-                <setmappermaxskiprecords href="#setMapperMaxSkipRecords(org.apache.hadoop.conf.Configuration, long)"/>
-                <setreducermaxskipgroups href="#setReducerMaxSkipGroups(org.apache.hadoop.conf.Configuration, long)"/>
-                <setattemptsTostartskipping href="#setAttemptsToStartSkipping(org.apache.hadoop.conf.Configuration, int)"/>
-                <setskipoutputpath href="#setSkipOutputPath(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path)"/>
-                <counter_map_processed_records href="#COUNTER_MAP_PROCESSED_RECORDS"/>
-                <counter_reduce_processed_groups href="#COUNTER_REDUCE_PROCESSED_GROUPS"/>
-              </skipbadrecords>
-              <textinputformat href="TextInputFormat.html" />
-              <textoutputformat href="TextOutputFormat.html" />
-              <lib href="lib/">
-                <package-summary href="package-summary.html" />
-                <hashpartitioner href="HashPartitioner.html" />
-                <keyfieldbasedpartitioner href="KeyFieldBasedPartitioner.html" />
-                <keyfieldbasedcomparator href="KeyFieldBasedComparator.html" />
-                <lazyoutputformat href="LazyOutputFormat.html" />
-                <aggregate href="aggregate/">
-                  <package-summary href="package-summary.html" />
-                </aggregate>
-              </lib>
-              <pipes href="pipes/">
-                <package-summary href="package-summary.html" />
-              </pipes>
-            </mapred>
-            <net href="net/">
-              <dnstoswitchmapping href="DNSToSwitchMapping.html">
-              <resolve href="#resolve(java.util.List)" />
-              </dnstoswitchmapping>
-            </net>
-            <streaming href="streaming/">
-              <package-summary href="package-summary.html" />
-            </streaming>
-            <util href="util/">
-              <genericoptionsparser href="GenericOptionsParser.html" />
-              <progress href="Progress.html" />
-              <tool href="Tool.html" />
-              <toolrunner href="ToolRunner.html">
-                <run href="#run(org.apache.hadoop.util.Tool, java.lang.String[])" />
-              </toolrunner>
-            </util>
-          </hadoop>
-        </apache>
-      </org>
-    </api>
-  </external-refs>
- 
-</site>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/tabs.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/tabs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/tabs.xml
deleted file mode 100644
index 8348749..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/tabs.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE tabs PUBLIC "-//APACHE//DTD Cocoon Documentation Tab V1.0//EN" 
-          "http://forrest.apache.org/dtd/tab-cocoon-v10.dtd">
-
-<tabs software="Hadoop"
-      title="Hadoop"
-      copyright="The Apache Software Foundation"
-      xmlns:xlink="http://www.w3.org/1999/xlink">
-
-  <!-- The rules are:
-    @dir will always have /index.html added.
-    @href is not modified unless it is root-relative and obviously specifies a
-    directory (ends in '/'), in which case /index.html will be added
-  -->
-
-  <tab label="Project" href="http://hadoop.apache.org/hdfs/" />
-  <tab label="Wiki" href="http://wiki.apache.org/hadoop/HDFS" />
-  <tab label="HDFS 0.24 Documentation" dir="" />  
-  
-</tabs>


[50/50] [abbrv] hadoop git commit: YARN-3609. Load node labels from storage inside RM serviceStart. Contributed by Wangda Tan

Posted by ji...@apache.org.
YARN-3609. Load node labels from storage inside RM serviceStart. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8966d421
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8966d421
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8966d421

Branch: refs/heads/HDFS-7240
Commit: 8966d4217969eb71767ba83a3ff2b5bb38189b19
Parents: 5774f6b
Author: Jian He <ji...@apache.org>
Authored: Wed May 20 16:26:02 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Wed May 20 16:30:07 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../nodelabels/CommonNodeLabelsManager.java     |  8 +-
 .../TestFileSystemNodeLabelsStore.java          |  4 +
 .../yarn/server/resourcemanager/MockRM.java     | 26 ++++--
 .../server/resourcemanager/RMHATestBase.java    |  4 +-
 .../resourcemanager/TestRMHAForNodeLabels.java  | 93 ++++++++++++++++++++
 6 files changed, 125 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8966d421/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dd2d8f4..dfbc06e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -529,6 +529,9 @@ Release 2.7.1 - UNRELEASED
     YARN-3681. yarn cmd says "could not find main class 'queue'" in windows.
     (Craig Welch and Varun Saxena via xgong)
 
+    YARN-3609. Load node labels from storage inside RM serviceStart. (Wangda
+    Tan via jianhe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8966d421/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index badf4d6..2d57ad6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -222,10 +222,6 @@ public class CommonNodeLabelsManager extends AbstractService {
 
     isDistributedNodeLabelConfiguration  =
         YarnConfiguration.isDistributedNodeLabelConfiguration(conf);
-
-    if (nodeLabelsEnabled) {
-      initNodeLabelStore(conf);
-    }
     
     labelCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL));
   }
@@ -245,6 +241,10 @@ public class CommonNodeLabelsManager extends AbstractService {
 
   @Override
   protected void serviceStart() throws Exception {
+    if (nodeLabelsEnabled) {
+      initNodeLabelStore(getConfig());
+    }
+    
     // init dispatcher only when service start, because recover will happen in
     // service init, we don't want to trigger any event handling at that time.
     initDispatcher(getConfig());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8966d421/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
index fb60cd6..4b052c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
@@ -108,6 +108,7 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
 
     mgr = new MockNodeLabelManager();
     mgr.init(conf);
+    mgr.start();
 
     // check variables
     Assert.assertEquals(3, mgr.getClusterNodeLabelNames().size());
@@ -127,6 +128,7 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
     mgr.stop();
     mgr = new MockNodeLabelManager();
     mgr.init(conf);
+    mgr.start();
 
     // check variables
     Assert.assertEquals(3, mgr.getClusterNodeLabelNames().size());
@@ -165,6 +167,7 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
     cf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE,
         YarnConfiguration.DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE);
     mgr.init(cf);
+    mgr.start();
 
     // check variables
     Assert.assertEquals(3, mgr.getClusterNodeLabels().size());
@@ -205,6 +208,7 @@ public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase {
 
     mgr = new MockNodeLabelManager();
     mgr.init(conf);
+    mgr.start();
 
     // check variables
     Assert.assertEquals(3, mgr.getClusterNodeLabelNames().size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8966d421/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 3469b0c..672ce13 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabels
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
@@ -98,6 +97,8 @@ public class MockRM extends ResourceManager {
 
   static final Logger LOG = Logger.getLogger(MockRM.class);
   static final String ENABLE_WEBAPP = "mockrm.webapp.enabled";
+  
+  final private boolean useNullRMNodeLabelsManager;
 
   public MockRM() {
     this(new YarnConfiguration());
@@ -108,20 +109,31 @@ public class MockRM extends ResourceManager {
   }
   
   public MockRM(Configuration conf, RMStateStore store) {
-    super();    
+    this(conf, store, true);
+  }
+  
+  public MockRM(Configuration conf, RMStateStore store,
+      boolean useNullRMNodeLabelsManager) {
+    super();
+    this.useNullRMNodeLabelsManager = useNullRMNodeLabelsManager;
     init(conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf));
     if(store != null) {
       setRMStateStore(store);
     }
     Logger rootLogger = LogManager.getRootLogger();
-    rootLogger.setLevel(Level.DEBUG);    
+    rootLogger.setLevel(Level.DEBUG);
   }
   
   @Override
-  protected RMNodeLabelsManager createNodeLabelManager() {
-    RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
-    mgr.init(getConfig());
-    return mgr;
+  protected RMNodeLabelsManager createNodeLabelManager()
+      throws InstantiationException, IllegalAccessException {
+    if (useNullRMNodeLabelsManager) {
+      RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+      mgr.init(getConfig());
+      return mgr;
+    } else {
+      return super.createNodeLabelManager();
+    }
   }
 
   public void waitForState(ApplicationId appId, RMAppState finalState)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8966d421/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
index 9f54de8..6f3666f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
@@ -106,8 +106,8 @@ public class RMHATestBase extends ClientBaseWithFixes{
   }
 
   protected void startRMs() throws IOException {
-    rm1 = new MockRM(confForRM1);
-    rm2 = new MockRM(confForRM2);
+    rm1 = new MockRM(confForRM1, null, false);
+    rm2 = new MockRM(confForRM2, null, false);
     startRMs(rm1, confForRM1, rm2, confForRM2);
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8966d421/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java
new file mode 100644
index 0000000..25d9c56
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForNodeLabels.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableSet;
+
+public class TestRMHAForNodeLabels extends RMHATestBase {
+  public static final Log LOG = LogFactory
+      .getLog(TestSubmitApplicationWithRMHA.class);
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    
+    // Create directory for node label store 
+    File tempDir = File.createTempFile("nlb", ".tmp");
+    tempDir.delete();
+    tempDir.mkdirs();
+    tempDir.deleteOnExit();
+    
+    confForRM1.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+    confForRM1.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
+        tempDir.getAbsolutePath());
+    
+    confForRM2.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+    confForRM2.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
+        tempDir.getAbsolutePath());
+  }
+  
+  @Test
+  public void testRMHARecoverNodeLabels() throws Exception {
+    // start two RMs, and transit rm1 to active, rm2 to standby
+    startRMs();
+    
+    // Add labels to rm1
+    rm1.getRMContext()
+        .getNodeLabelManager()
+        .addToCluserNodeLabels(
+            Arrays.asList(NodeLabel.newInstance("a"),
+                NodeLabel.newInstance("b"), NodeLabel.newInstance("c")));
+   
+    Map<NodeId, Set<String>> nodeToLabels = new HashMap<>();
+    nodeToLabels.put(NodeId.newInstance("host1", 0), ImmutableSet.of("a"));
+    nodeToLabels.put(NodeId.newInstance("host2", 0), ImmutableSet.of("b"));
+    
+    rm1.getRMContext().getNodeLabelManager().replaceLabelsOnNode(nodeToLabels);
+
+    // Do the failover
+    explicitFailover();
+
+    // Check labels in rm2
+    Assert
+        .assertTrue(rm2.getRMContext().getNodeLabelManager()
+            .getClusterNodeLabelNames()
+            .containsAll(ImmutableSet.of("a", "b", "c")));
+    Assert.assertTrue(rm2.getRMContext().getNodeLabelManager()
+        .getNodeLabels().get(NodeId.newInstance("host1", 0)).contains("a"));
+    Assert.assertTrue(rm2.getRMContext().getNodeLabelManager()
+        .getNodeLabels().get(NodeId.newInstance("host2", 0)).contains("b"));
+  }
+}


[39/50] [abbrv] hadoop git commit: HADOOP-11963. Metrics documentation for FSNamesystem misspells PendingDataNodeMessageCount. Contributed by Anu Engineer.

Posted by ji...@apache.org.
HADOOP-11963. Metrics documentation for FSNamesystem misspells PendingDataNodeMessageCount. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e422e76f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e422e76f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e422e76f

Branch: refs/heads/HDFS-7240
Commit: e422e76fcaaa04fc22384d978a2abae967d801b6
Parents: fd3cb53
Author: cnauroth <cn...@apache.org>
Authored: Tue May 19 11:50:27 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue May 19 11:50:27 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e422e76f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e4537a3..4621f80 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -716,6 +716,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11581. Multithreaded correctness Warnings
     #org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
 
+    HADOOP-11963. Metrics documentation for FSNamesystem misspells
+    PendingDataNodeMessageCount. (Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e422e76f/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 4a10a00..ca89745 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -225,7 +225,7 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
 | `PendingDeletionBlocks` | Current number of blocks pending deletion |
 | `ExcessBlocks` | Current number of excess blocks |
 | `PostponedMisreplicatedBlocks` | (HA-only) Current number of blocks postponed to replicate |
-| `PendingDataNodeMessageCourt` | (HA-only) Current number of pending block-related messages for later processing in the standby NameNode |
+| `PendingDataNodeMessageCount` | (HA-only) Current number of pending block-related messages for later processing in the standby NameNode |
 | `MillisSinceLastLoadedEdits` | (HA-only) Time in milliseconds since the last time standby NameNode load edit log. In active NameNode, set to 0 |
 | `BlockCapacity` | Current number of block capacity |
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed heartbeat |


[38/50] [abbrv] hadoop git commit: Moving MAPREDUCE-6361 to 2.7.1 CHANGES.txt

Posted by ji...@apache.org.
Moving MAPREDUCE-6361 to 2.7.1 CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ca1dfee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ca1dfee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ca1dfee

Branch: refs/heads/HDFS-7240
Commit: 8ca1dfeebb660741aa6e5b137cd1088815b614cf
Parents: 8860e35
Author: Junping Du <ju...@apache.org>
Authored: Tue May 19 11:48:06 2015 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Tue May 19 11:48:51 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ca1dfee/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 7e34297..10703c6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -415,10 +415,6 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, 
     introducing compile error (Arshad Mohammad via vinayakumarb)
 
-    MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
-    copySucceeded() in one thread and copyFailed() in another thread on the
-    same host. (Junping Du via ozawa)
-
     MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort
     doesn't work. (Takuya Fukudome via ozawa)
 
@@ -465,6 +461,10 @@ Release 2.7.1 - UNRELEASED
     that they don't fail on history-server backed by DFSes with not so strong
     guarantees. (Craig Welch via vinodkv)
 
+    MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
+    copySucceeded() in one thread and copyFailed() in another thread on the
+    same host. (Junping Du via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES


[09/50] [abbrv] hadoop git commit: HDFS-8397. Refactor the error handling code in DataStreamer. Contributed by Tsz Wo Nicholas Sze.

Posted by ji...@apache.org.
HDFS-8397. Refactor the error handling code in DataStreamer. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f378733
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f378733
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f378733

Branch: refs/heads/HDFS-7240
Commit: 8f378733423a5244461df79a92c00239514b8b93
Parents: f7e051c
Author: Jing Zhao <ji...@apache.org>
Authored: Fri May 15 16:14:54 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri May 15 16:14:54 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/DataStreamer.java    | 543 +++++++++++--------
 2 files changed, 308 insertions(+), 238 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f378733/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6c0923c..35e81f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -554,6 +554,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-6888. Allow selectively audit logging ops (Chen He via vinayakumarb)
 
+    HDFS-8397. Refactor the error handling code in DataStreamer.
+    (Tsz Wo Nicholas Sze via jing9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f378733/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index b472b8c..cecd5a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -38,7 +38,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.Log;
@@ -46,6 +45,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -208,6 +208,133 @@ class DataStreamer extends Daemon {
     }
   }
 
+  static class ErrorState {
+    private boolean error = false;
+    private int badNodeIndex = -1;
+    private int restartingNodeIndex = -1;
+    private long restartingNodeDeadline = 0;
+    private final long datanodeRestartTimeout;
+
+    ErrorState(long datanodeRestartTimeout) {
+      this.datanodeRestartTimeout = datanodeRestartTimeout;
+    }
+
+    synchronized void reset() {
+      error = false;
+      badNodeIndex = -1;
+      restartingNodeIndex = -1;
+      restartingNodeDeadline = 0;
+    }
+
+    synchronized boolean hasError() {
+      return error;
+    }
+
+    synchronized boolean hasDatanodeError() {
+      return error && isNodeMarked();
+    }
+
+    synchronized void setError(boolean err) {
+      this.error = err;
+    }
+
+    synchronized void setBadNodeIndex(int index) {
+      this.badNodeIndex = index;
+    }
+
+    synchronized int getBadNodeIndex() {
+      return badNodeIndex;
+    }
+
+    synchronized int getRestartingNodeIndex() {
+      return restartingNodeIndex;
+    }
+
+    synchronized void initRestartingNode(int i, String message) {
+      restartingNodeIndex = i;
+      restartingNodeDeadline =  Time.monotonicNow() + datanodeRestartTimeout;
+      // If the data streamer has already set the primary node
+      // bad, clear it. It is likely that the write failed due to
+      // the DN shutdown. Even if it was a real failure, the pipeline
+      // recovery will take care of it.
+      badNodeIndex = -1;
+      LOG.info(message);
+    }
+
+    synchronized boolean isRestartingNode() {
+      return restartingNodeIndex >= 0;
+    }
+
+    synchronized boolean isNodeMarked() {
+      return badNodeIndex >= 0 || isRestartingNode();
+    }
+
+    /**
+     * This method is used when no explicit error report was received, but
+     * something failed. The first node is a suspect or unsure about the cause
+     * so that it is marked as failed.
+     */
+    synchronized void markFirstNodeIfNotMarked() {
+      // There should be no existing error and no ongoing restart.
+      if (!isNodeMarked()) {
+        badNodeIndex = 0;
+      }
+    }
+
+    synchronized void adjustState4RestartingNode() {
+      // Just took care of a node error while waiting for a node restart
+      if (restartingNodeIndex >= 0) {
+        // If the error came from a node further away than the restarting
+        // node, the restart must have been complete.
+        if (badNodeIndex > restartingNodeIndex) {
+          restartingNodeIndex = -1;
+        } else if (badNodeIndex < restartingNodeIndex) {
+          // the node index has shifted.
+          restartingNodeIndex--;
+        } else {
+          throw new IllegalStateException("badNodeIndex = " + badNodeIndex
+              + " = restartingNodeIndex = " + restartingNodeIndex);
+        }
+      }
+
+      if (!isRestartingNode()) {
+        error = false;
+      }
+      badNodeIndex = -1;
+    }
+
+    synchronized void checkRestartingNodeDeadline(DatanodeInfo[] nodes) {
+      if (restartingNodeIndex >= 0) {
+        if (!error) {
+          throw new IllegalStateException("error=false while checking" +
+              " restarting node deadline");
+        }
+
+        // check badNodeIndex
+        if (badNodeIndex == restartingNodeIndex) {
+          // ignore, if came from the restarting node
+          badNodeIndex = -1;
+        }
+        // not within the deadline
+        if (Time.monotonicNow() >= restartingNodeDeadline) {
+          // expired. declare the restarting node dead
+          restartingNodeDeadline = 0;
+          final int i = restartingNodeIndex;
+          restartingNodeIndex = -1;
+          LOG.warn("Datanode " + i + " did not restart within "
+              + datanodeRestartTimeout + "ms: " + nodes[i]);
+          // Mark the restarting node as failed. If there is any other failed
+          // node during the last pipeline construction attempt, it will not be
+          // overwritten/dropped. In this case, the restarting node will get
+          // excluded in the following attempt, if it still does not come up.
+          if (badNodeIndex == -1) {
+            badNodeIndex = i;
+          }
+        }
+      }
+    }
+  }
+
   private volatile boolean streamerClosed = false;
   private ExtendedBlock block; // its length is number of bytes acked
   private Token<BlockTokenIdentifier> accessToken;
@@ -217,11 +344,8 @@ class DataStreamer extends Daemon {
   private volatile DatanodeInfo[] nodes = null; // list of targets for current block
   private volatile StorageType[] storageTypes = null;
   private volatile String[] storageIDs = null;
-  volatile boolean hasError = false;
-  volatile int errorIndex = -1;
-  // Restarting node index
-  AtomicInteger restartingNodeIndex = new AtomicInteger(-1);
-  private long restartDeadline = 0; // Deadline of DN restart
+  private final ErrorState errorState;
+
   private BlockConstructionStage stage;  // block construction stage
   private long bytesSent = 0; // number of bytes that've been sent
   private final boolean isLazyPersistFile;
@@ -287,11 +411,13 @@ class DataStreamer extends Daemon {
     this.cachingStrategy = cachingStrategy;
     this.byteArrayManager = byteArrayManage;
     this.isLazyPersistFile = isLazyPersist(stat);
-    this.dfsclientSlowLogThresholdMs =
-        dfsClient.getConf().getSlowIoWarningThresholdMs();
-    this.excludedNodes = initExcludedNodes();
     this.isAppend = isAppend;
     this.favoredNodes = favoredNodes;
+
+    final DfsClientConf conf = dfsClient.getConf();
+    this.dfsclientSlowLogThresholdMs = conf.getSlowIoWarningThresholdMs();
+    this.excludedNodes = initExcludedNodes(conf.getExcludedNodesCacheExpiry());
+    this.errorState = new ErrorState(conf.getDatanodeRestartTimeout());
   }
 
   /**
@@ -334,7 +460,6 @@ class DataStreamer extends Daemon {
   void setPipelineInConstruction(LocatedBlock lastBlock) throws IOException{
     // setup pipeline to append to the last block XXX retries??
     setPipeline(lastBlock);
-    errorIndex = -1;   // no errors yet.
     if (nodes.length < 1) {
       throw new IOException("Unable to retrieve blocks locations " +
           " for last block " + block +
@@ -375,6 +500,10 @@ class DataStreamer extends Daemon {
     stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
   }
 
+  private boolean shouldStop() {
+    return streamerClosed || errorState.hasError() || !dfsClient.clientRunning;
+  }
+
   /*
    * streamer thread is the only thread that opens streams to datanode,
    * and closes them. Any error recovery is also done by this thread.
@@ -385,7 +514,7 @@ class DataStreamer extends Daemon {
     TraceScope scope = NullScope.INSTANCE;
     while (!streamerClosed && dfsClient.clientRunning) {
       // if the Responder encountered an error, shutdown Responder
-      if (hasError && response != null) {
+      if (errorState.hasError() && response != null) {
         try {
           response.close();
           response.join();
@@ -398,17 +527,13 @@ class DataStreamer extends Daemon {
       DFSPacket one;
       try {
         // process datanode IO errors if any
-        boolean doSleep = false;
-        if (hasError && (errorIndex >= 0 || restartingNodeIndex.get() >= 0)) {
-          doSleep = processDatanodeError();
-        }
+        boolean doSleep = processDatanodeError();
 
         final int halfSocketTimeout = dfsClient.getConf().getSocketTimeout()/2; 
         synchronized (dataQueue) {
           // wait for a packet to be sent.
           long now = Time.monotonicNow();
-          while ((!streamerClosed && !hasError && dfsClient.clientRunning
-              && dataQueue.size() == 0 &&
+          while ((!shouldStop() && dataQueue.size() == 0 &&
               (stage != BlockConstructionStage.DATA_STREAMING ||
                   stage == BlockConstructionStage.DATA_STREAMING &&
                       now - lastPacket < halfSocketTimeout)) || doSleep ) {
@@ -424,13 +549,12 @@ class DataStreamer extends Daemon {
             doSleep = false;
             now = Time.monotonicNow();
           }
-          if (streamerClosed || hasError || !dfsClient.clientRunning) {
+          if (shouldStop()) {
             continue;
           }
           // get packet to be sent.
           if (dataQueue.isEmpty()) {
             one = createHeartbeatPacket();
-            assert one != null;
           } else {
             try {
               backOffIfNecessary();
@@ -460,7 +584,7 @@ class DataStreamer extends Daemon {
             LOG.debug("Append to block " + block);
           }
           setupPipelineForAppendOrRecovery();
-          if (true == streamerClosed) {
+          if (streamerClosed) {
             continue;
           }
           initDataStreaming();
@@ -478,8 +602,7 @@ class DataStreamer extends Daemon {
         if (one.isLastPacketInBlock()) {
           // wait for all data packets have been successfully acked
           synchronized (dataQueue) {
-            while (!streamerClosed && !hasError &&
-                ackQueue.size() != 0 && dfsClient.clientRunning) {
+            while (!shouldStop() && ackQueue.size() != 0) {
               try {
                 // wait for acks to arrive from datanodes
                 dataQueue.wait(1000);
@@ -488,7 +611,7 @@ class DataStreamer extends Daemon {
               }
             }
           }
-          if (streamerClosed || hasError || !dfsClient.clientRunning) {
+          if (shouldStop()) {
             continue;
           }
           stage = BlockConstructionStage.PIPELINE_CLOSE;
@@ -524,7 +647,7 @@ class DataStreamer extends Daemon {
           // effect. Pipeline recovery can handle only one node error at a
           // time. If the primary node fails again during the recovery, it
           // will be taken out then.
-          tryMarkPrimaryDatanodeFailed();
+          errorState.markFirstNodeIfNotMarked();
           throw e;
         } finally {
           writeScope.close();
@@ -537,7 +660,7 @@ class DataStreamer extends Daemon {
           bytesSent = tmpBytesSent;
         }
 
-        if (streamerClosed || hasError || !dfsClient.clientRunning) {
+        if (shouldStop()) {
           continue;
         }
 
@@ -545,12 +668,11 @@ class DataStreamer extends Daemon {
         if (one.isLastPacketInBlock()) {
           // wait for the close packet has been acked
           synchronized (dataQueue) {
-            while (!streamerClosed && !hasError &&
-                ackQueue.size() != 0 && dfsClient.clientRunning) {
+            while (!shouldStop() && ackQueue.size() != 0) {
               dataQueue.wait(1000);// wait for acks to arrive from datanodes
             }
           }
-          if (streamerClosed || hasError || !dfsClient.clientRunning) {
+          if (shouldStop()) {
             continue;
           }
 
@@ -564,7 +686,7 @@ class DataStreamer extends Daemon {
         }
       } catch (Throwable e) {
         // Log warning if there was a real error.
-        if (restartingNodeIndex.get() == -1) {
+        if (!errorState.isRestartingNode()) {
           // Since their messages are descriptive enough, do not always
           // log a verbose stack-trace WARN for quota exceptions.
           if (e instanceof QuotaExceededException) {
@@ -575,8 +697,8 @@ class DataStreamer extends Daemon {
         }
         lastException.set(e);
         assert !(e instanceof NullPointerException);
-        hasError = true;
-        if (errorIndex == -1 && restartingNodeIndex.get() == -1) {
+        errorState.setError(true);
+        if (!errorState.isNodeMarked()) {
           // Not a datanode issue
           streamerClosed = true;
         }
@@ -773,40 +895,6 @@ class DataStreamer extends Daemon {
     }
   }
 
-  // The following synchronized methods are used whenever
-  // errorIndex or restartingNodeIndex is set. This is because
-  // check & set needs to be atomic. Simply reading variables
-  // does not require a synchronization. When responder is
-  // not running (e.g. during pipeline recovery), there is no
-  // need to use these methods.
-
-  /** Set the error node index. Called by responder */
-  synchronized void setErrorIndex(int idx) {
-    errorIndex = idx;
-  }
-
-  /** Set the restarting node index. Called by responder */
-  synchronized void setRestartingNodeIndex(int idx) {
-    restartingNodeIndex.set(idx);
-    // If the data streamer has already set the primary node
-    // bad, clear it. It is likely that the write failed due to
-    // the DN shutdown. Even if it was a real failure, the pipeline
-    // recovery will take care of it.
-    errorIndex = -1;
-  }
-
-  /**
-   * This method is used when no explicit error report was received,
-   * but something failed. When the primary node is a suspect or
-   * unsure about the cause, the primary node is marked as failed.
-   */
-  synchronized void tryMarkPrimaryDatanodeFailed() {
-    // There should be no existing error and no ongoing restart.
-    if ((errorIndex == -1) && (restartingNodeIndex.get() == -1)) {
-      errorIndex = 0;
-    }
-  }
-
   /**
    * Examine whether it is worth waiting for a node to restart.
    * @param index the node index
@@ -883,20 +971,16 @@ class DataStreamer extends Daemon {
             // the local node or the only one in the pipeline.
             if (PipelineAck.isRestartOOBStatus(reply) &&
                 shouldWaitForRestart(i)) {
-              restartDeadline = dfsClient.getConf().getDatanodeRestartTimeout()
-                  + Time.monotonicNow();
-              setRestartingNodeIndex(i);
-              String message = "A datanode is restarting: " + targets[i];
-              LOG.info(message);
+              final String message = "Datanode " + i + " is restarting: "
+                  + targets[i];
+              errorState.initRestartingNode(i, message);
               throw new IOException(message);
             }
             // node error
             if (reply != SUCCESS) {
-              setErrorIndex(i); // first bad datanode
+              errorState.setBadNodeIndex(i); // mark bad datanode
               throw new IOException("Bad response " + reply +
-                  " for block " + block +
-                  " from datanode " +
-                  targets[i]);
+                  " for " + block + " from datanode " + targets[i]);
             }
           }
 
@@ -954,14 +1038,12 @@ class DataStreamer extends Daemon {
         } catch (Exception e) {
           if (!responderClosed) {
             lastException.set(e);
-            hasError = true;
-            // If no explicit error report was received, mark the primary
-            // node as failed.
-            tryMarkPrimaryDatanodeFailed();
+            errorState.setError(true);
+            errorState.markFirstNodeIfNotMarked();
             synchronized (dataQueue) {
               dataQueue.notifyAll();
             }
-            if (restartingNodeIndex.get() == -1) {
+            if (!errorState.isRestartingNode()) {
               LOG.warn("Exception for " + block, e);
             }
             responderClosed = true;
@@ -978,11 +1060,16 @@ class DataStreamer extends Daemon {
     }
   }
 
-  // If this stream has encountered any errors so far, shutdown
-  // threads and mark stream as closed. Returns true if we should
-  // sleep for a while after returning from this call.
-  //
+  /**
+   * If this stream has encountered any errors, shutdown threads
+   * and mark the stream as closed.
+   *
+   * @return true if it should sleep for a while after returning.
+   */
   private boolean processDatanodeError() throws IOException {
+    if (!errorState.hasDatanodeError()) {
+      return false;
+    }
     if (response != null) {
       LOG.info("Error Recovery for " + block +
           " waiting for responder to exit. ");
@@ -1064,7 +1151,7 @@ class DataStreamer extends Daemon {
               .append("The current failed datanode replacement policy is ")
               .append(dfsClient.dtpReplaceDatanodeOnFailure).append(", and ")
               .append("a client may configure this via '")
-              .append(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY)
+              .append(BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY)
               .append("' in its configuration.")
               .toString());
     }
@@ -1190,157 +1277,141 @@ class DataStreamer extends Daemon {
     boolean success = false;
     long newGS = 0L;
     while (!success && !streamerClosed && dfsClient.clientRunning) {
-      // Sleep before reconnect if a dn is restarting.
-      // This process will be repeated until the deadline or the datanode
-      // starts back up.
-      if (restartingNodeIndex.get() >= 0) {
-        // 4 seconds or the configured deadline period, whichever is shorter.
-        // This is the retry interval and recovery will be retried in this
-        // interval until timeout or success.
-        long delay = Math.min(dfsClient.getConf().getDatanodeRestartTimeout(),
-            4000L);
-        try {
-          Thread.sleep(delay);
-        } catch (InterruptedException ie) {
-          lastException.set(new IOException("Interrupted while waiting for " +
-              "datanode to restart. " + nodes[restartingNodeIndex.get()]));
-          streamerClosed = true;
-          return false;
-        }
+      if (!handleRestartingDatanode()) {
+        return false;
       }
-      boolean isRecovery = hasError;
-      // remove bad datanode from list of datanodes.
-      // If errorIndex was not set (i.e. appends), then do not remove
-      // any datanodes
-      //
-      if (errorIndex >= 0) {
-        StringBuilder pipelineMsg = new StringBuilder();
-        for (int j = 0; j < nodes.length; j++) {
-          pipelineMsg.append(nodes[j]);
-          if (j < nodes.length - 1) {
-            pipelineMsg.append(", ");
-          }
-        }
-        if (nodes.length <= 1) {
-          lastException.set(new IOException("All datanodes " + pipelineMsg
-              + " are bad. Aborting..."));
-          streamerClosed = true;
-          return false;
-        }
-        LOG.warn("Error Recovery for block " + block +
-            " in pipeline " + pipelineMsg +
-            ": bad datanode " + nodes[errorIndex]);
-        failed.add(nodes[errorIndex]);
-
-        DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
-        arraycopy(nodes, newnodes, errorIndex);
-
-        final StorageType[] newStorageTypes = new StorageType[newnodes.length];
-        arraycopy(storageTypes, newStorageTypes, errorIndex);
-
-        final String[] newStorageIDs = new String[newnodes.length];
-        arraycopy(storageIDs, newStorageIDs, errorIndex);
-
-        setPipeline(newnodes, newStorageTypes, newStorageIDs);
-
-        // Just took care of a node error while waiting for a node restart
-        if (restartingNodeIndex.get() >= 0) {
-          // If the error came from a node further away than the restarting
-          // node, the restart must have been complete.
-          if (errorIndex > restartingNodeIndex.get()) {
-            restartingNodeIndex.set(-1);
-          } else if (errorIndex < restartingNodeIndex.get()) {
-            // the node index has shifted.
-            restartingNodeIndex.decrementAndGet();
-          } else {
-            // this shouldn't happen...
-            assert false;
-          }
-        }
 
-        if (restartingNodeIndex.get() == -1) {
-          hasError = false;
-        }
-        lastException.clear();
-        errorIndex = -1;
+      final boolean isRecovery = errorState.hasError();
+      if (!handleBadDatanode()) {
+        return false;
       }
 
-      // Check if replace-datanode policy is satisfied.
-      if (dfsClient.dtpReplaceDatanodeOnFailure.satisfy(stat.getReplication(),
-          nodes, isAppend, isHflushed)) {
-        try {
-          addDatanode2ExistingPipeline();
-        } catch(IOException ioe) {
-          if (!dfsClient.dtpReplaceDatanodeOnFailure.isBestEffort()) {
-            throw ioe;
-          }
-          LOG.warn("Failed to replace datanode."
-              + " Continue with the remaining datanodes since "
-              + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY
-              + " is set to true.", ioe);
-        }
-      }
+      handleDatanodeReplacement();
 
       // get a new generation stamp and an access token
-      LocatedBlock lb = dfsClient.namenode.updateBlockForPipeline(block, dfsClient.clientName);
+      final LocatedBlock lb = updateBlockForPipeline();
       newGS = lb.getBlock().getGenerationStamp();
       accessToken = lb.getBlockToken();
 
       // set up the pipeline again with the remaining nodes
-      if (failPacket) { // for testing
-        success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
-        failPacket = false;
-        try {
-          // Give DNs time to send in bad reports. In real situations,
-          // good reports should follow bad ones, if client committed
-          // with those nodes.
-          Thread.sleep(2000);
-        } catch (InterruptedException ie) {}
-      } else {
-        success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
-      }
+      success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
 
-      if (restartingNodeIndex.get() >= 0) {
-        assert hasError == true;
-        // check errorIndex set above
-        if (errorIndex == restartingNodeIndex.get()) {
-          // ignore, if came from the restarting node
-          errorIndex = -1;
-        }
-        // still within the deadline
-        if (Time.monotonicNow() < restartDeadline) {
-          continue; // with in the deadline
-        }
-        // expired. declare the restarting node dead
-        restartDeadline = 0;
-        int expiredNodeIndex = restartingNodeIndex.get();
-        restartingNodeIndex.set(-1);
-        LOG.warn("Datanode did not restart in time: " +
-            nodes[expiredNodeIndex]);
-        // Mark the restarting node as failed. If there is any other failed
-        // node during the last pipeline construction attempt, it will not be
-        // overwritten/dropped. In this case, the restarting node will get
-        // excluded in the following attempt, if it still does not come up.
-        if (errorIndex == -1) {
-          errorIndex = expiredNodeIndex;
-        }
-        // From this point on, normal pipeline recovery applies.
-      }
+      failPacket4Testing();
+
+      errorState.checkRestartingNodeDeadline(nodes);
     } // while
 
     if (success) {
-      // update pipeline at the namenode
-      ExtendedBlock newBlock = new ExtendedBlock(
-          block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
-      dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock,
-          nodes, storageIDs);
-      // update client side generation stamp
-      block = newBlock;
+      block = updatePipeline(newGS);
     }
     return false; // do not sleep, continue processing
   }
 
   /**
+   * Sleep if a node is restarting.
+   * This process is repeated until the deadline or the node starts back up.
+   * @return true if it should continue.
+   */
+  private boolean handleRestartingDatanode() {
+    if (errorState.isRestartingNode()) {
+      // 4 seconds or the configured deadline period, whichever is shorter.
+      // This is the retry interval and recovery will be retried in this
+      // interval until timeout or success.
+      final long delay = Math.min(errorState.datanodeRestartTimeout, 4000L);
+      try {
+        Thread.sleep(delay);
+      } catch (InterruptedException ie) {
+        lastException.set(new IOException(
+            "Interrupted while waiting for restarting "
+            + nodes[errorState.getRestartingNodeIndex()]));
+        streamerClosed = true;
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Remove bad node from list of nodes if badNodeIndex was set.
+   * @return true if it should continue.
+   */
+  private boolean handleBadDatanode() {
+    final int badNodeIndex = errorState.getBadNodeIndex();
+    if (badNodeIndex >= 0) {
+      if (nodes.length <= 1) {
+        lastException.set(new IOException("All datanodes "
+            + Arrays.toString(nodes) + " are bad. Aborting..."));
+        streamerClosed = true;
+        return false;
+      }
+
+      LOG.warn("Error Recovery for " + block + " in pipeline "
+          + Arrays.toString(nodes) + ": datanode " + badNodeIndex
+          + "("+ nodes[badNodeIndex] + ") is bad.");
+      failed.add(nodes[badNodeIndex]);
+
+      DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
+      arraycopy(nodes, newnodes, badNodeIndex);
+
+      final StorageType[] newStorageTypes = new StorageType[newnodes.length];
+      arraycopy(storageTypes, newStorageTypes, badNodeIndex);
+
+      final String[] newStorageIDs = new String[newnodes.length];
+      arraycopy(storageIDs, newStorageIDs, badNodeIndex);
+
+      setPipeline(newnodes, newStorageTypes, newStorageIDs);
+
+      errorState.adjustState4RestartingNode();
+      lastException.clear();
+    }
+    return true;
+  }
+
+  /** Add a datanode if replace-datanode policy is satisfied. */
+  private void handleDatanodeReplacement() throws IOException {
+    if (dfsClient.dtpReplaceDatanodeOnFailure.satisfy(stat.getReplication(),
+        nodes, isAppend, isHflushed)) {
+      try {
+        addDatanode2ExistingPipeline();
+      } catch(IOException ioe) {
+        if (!dfsClient.dtpReplaceDatanodeOnFailure.isBestEffort()) {
+          throw ioe;
+        }
+        LOG.warn("Failed to replace datanode."
+            + " Continue with the remaining datanodes since "
+            + BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY
+            + " is set to true.", ioe);
+      }
+    }
+  }
+
+  private void failPacket4Testing() {
+    if (failPacket) { // for testing
+      failPacket = false;
+      try {
+        // Give DNs time to send in bad reports. In real situations,
+        // good reports should follow bad ones, if client committed
+        // with those nodes.
+        Thread.sleep(2000);
+      } catch (InterruptedException ie) {}
+    }
+  }
+
+  LocatedBlock updateBlockForPipeline() throws IOException {
+    return dfsClient.namenode.updateBlockForPipeline(
+        block, dfsClient.clientName);
+  }
+
+  /** update pipeline at the namenode */
+  ExtendedBlock updatePipeline(long newGS) throws IOException {
+    final ExtendedBlock newBlock = new ExtendedBlock(
+        block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
+    dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock,
+        nodes, storageIDs);
+    return newBlock;
+  }
+
+  /**
    * Open a DataStreamer to a DataNode so that it can be written to.
    * This happens when a file is created and each time a new block is allocated.
    * Must get block ID and the IDs of the destinations from the namenode.
@@ -1354,9 +1425,8 @@ class DataStreamer extends Daemon {
     boolean success = false;
     ExtendedBlock oldBlock = block;
     do {
-      hasError = false;
+      errorState.reset();
       lastException.clear();
-      errorIndex = -1;
       success = false;
 
       DatanodeInfo[] excluded =
@@ -1382,8 +1452,9 @@ class DataStreamer extends Daemon {
         dfsClient.namenode.abandonBlock(block, stat.getFileId(), src,
             dfsClient.clientName);
         block = null;
-        LOG.info("Excluding datanode " + nodes[errorIndex]);
-        excludedNodes.put(nodes[errorIndex], nodes[errorIndex]);
+        final DatanodeInfo badNode = nodes[errorState.getBadNodeIndex()];
+        LOG.info("Excluding datanode " + badNode);
+        excludedNodes.put(badNode, badNode);
       }
     } while (!success && --count >= 0);
 
@@ -1464,7 +1535,7 @@ class DataStreamer extends Daemon {
         // from the local datanode. Thus it is safe to treat this as a
         // regular node error.
         if (PipelineAck.isRestartOOBStatus(pipelineStatus) &&
-            restartingNodeIndex.get() == -1) {
+            !errorState.isRestartingNode()) {
           checkRestart = true;
           throw new IOException("A datanode is restarting.");
         }
@@ -1475,10 +1546,9 @@ class DataStreamer extends Daemon {
         assert null == blockStream : "Previous blockStream unclosed";
         blockStream = out;
         result =  true; // success
-        restartingNodeIndex.set(-1);
-        hasError = false;
+        errorState.reset();
       } catch (IOException ie) {
-        if (restartingNodeIndex.get() == -1) {
+        if (!errorState.isRestartingNode()) {
           LOG.info("Exception in createBlockOutputStream", ie);
         }
         if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
@@ -1498,24 +1568,21 @@ class DataStreamer extends Daemon {
           for (int i = 0; i < nodes.length; i++) {
             // NB: Unconditionally using the xfer addr w/o hostname
             if (firstBadLink.equals(nodes[i].getXferAddr())) {
-              errorIndex = i;
+              errorState.setBadNodeIndex(i);
               break;
             }
           }
         } else {
           assert checkRestart == false;
-          errorIndex = 0;
+          errorState.setBadNodeIndex(0);
         }
+
+        final int i = errorState.getBadNodeIndex();
         // Check whether there is a restart worth waiting for.
-        if (checkRestart && shouldWaitForRestart(errorIndex)) {
-          restartDeadline = dfsClient.getConf().getDatanodeRestartTimeout()
-              + Time.monotonicNow();
-          restartingNodeIndex.set(errorIndex);
-          errorIndex = -1;
-          LOG.info("Waiting for the datanode to be restarted: " +
-              nodes[restartingNodeIndex.get()]);
+        if (checkRestart && shouldWaitForRestart(i)) {
+          errorState.initRestartingNode(i, "Datanode " + i + " is restarting: " + nodes[i]);
         }
-        hasError = true;
+        errorState.setError(true);
         lastException.set(ie);
         result =  false;  // error
       } finally {
@@ -1699,10 +1766,10 @@ class DataStreamer extends Daemon {
     return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO, 0, false);
   }
 
-  private LoadingCache<DatanodeInfo, DatanodeInfo> initExcludedNodes() {
-    return CacheBuilder.newBuilder().expireAfterWrite(
-        dfsClient.getConf().getExcludedNodesCacheExpiry(),
-        TimeUnit.MILLISECONDS)
+  private static LoadingCache<DatanodeInfo, DatanodeInfo> initExcludedNodes(
+      long excludedNodesCacheExpiry) {
+    return CacheBuilder.newBuilder()
+        .expireAfterWrite(excludedNodesCacheExpiry, TimeUnit.MILLISECONDS)
         .removalListener(new RemovalListener<DatanodeInfo, DatanodeInfo>() {
           @Override
           public void onRemoval(


[07/50] [abbrv] hadoop git commit: YARN-3505 addendum: fix an issue in previous patch.

Posted by ji...@apache.org.
YARN-3505 addendum: fix an issue in previous patch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03a293ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03a293ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03a293ae

Branch: refs/heads/HDFS-7240
Commit: 03a293aed6de101b0cae1a294f506903addcaa75
Parents: 1ea9014
Author: Junping Du <ju...@apache.org>
Authored: Fri May 15 06:39:39 2015 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Fri May 15 06:39:39 2015 -0700

----------------------------------------------------------------------
 .../containermanager/logaggregation/AppLogAggregatorImpl.java      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03a293ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index dd2ab25..81be813 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -359,7 +359,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
         finalReport.setApplicationId(appId);
         finalReport.setLogAggregationStatus(renameTemporaryLogFileFailed
             ? LogAggregationStatus.FAILED : LogAggregationStatus.SUCCEEDED);
-        this.context.getLogAggregationStatusForApps().add(report);
+        this.context.getLogAggregationStatusForApps().add(finalReport);
       }
     } finally {
       if (writer != null) {


[28/50] [abbrv] hadoop git commit: HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only. Contributed by Kengo Seki.

Posted by ji...@apache.org.
HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only. Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b50dcdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b50dcdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b50dcdc

Branch: refs/heads/HDFS-7240
Commit: 3b50dcdce4ffe3d4e5892fca84909ff22be28739
Parents: 93972a3
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 19 17:25:27 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 19 17:25:27 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../java/org/apache/hadoop/fs/shell/Ls.java     | 36 +++++++++--
 .../src/site/markdown/FileSystemShell.md        |  3 +-
 .../java/org/apache/hadoop/fs/shell/TestLs.java | 67 ++++++++++++++++++++
 .../src/test/resources/testConf.xml             |  6 +-
 5 files changed, 106 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b50dcdc/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf09c5f..8ce77b6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -488,6 +488,9 @@ Release 2.8.0 - UNRELEASED
 
     HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)
 
+    HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only.
+    (Kengo Seki via aajisaka)
+
   IMPROVEMENTS
 
     HADOOP-6842. "hadoop fs -text" does not give a useful text representation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b50dcdc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 8ef6d5e..d5c52ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -43,6 +43,7 @@ class Ls extends FsCommand {
     factory.addClass(Lsr.class, "-lsr");
   }
 
+  private static final String OPTION_PATHONLY = "C";
   private static final String OPTION_DIRECTORY = "d";
   private static final String OPTION_HUMAN = "h";
   private static final String OPTION_RECURSIVE = "R";
@@ -52,10 +53,10 @@ class Ls extends FsCommand {
   private static final String OPTION_SIZE = "S";
 
   public static final String NAME = "ls";
-  public static final String USAGE = "[-" + OPTION_DIRECTORY + "] [-"
-      + OPTION_HUMAN + "] " + "[-" + OPTION_RECURSIVE + "] [-" + OPTION_MTIME
-      + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE + "] " + "[-"
-      + OPTION_ATIME + "] [<path> ...]";
+  public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-"
+      + OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" + OPTION_RECURSIVE
+      + "] [-" + OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE
+      + "] [-" + OPTION_ATIME + "] [<path> ...]";
 
   public static final String DESCRIPTION =
       "List the contents that match the specified file pattern. If " +
@@ -67,6 +68,8 @@ class Ls extends FsCommand {
           "\tpermissions - userId groupId sizeOfDirectory(in bytes) modificationDate(yyyy-MM-dd HH:mm) directoryName\n\n" +
           "and file entries are of the form:\n" +
           "\tpermissions numberOfReplicas userId groupId sizeOfFile(in bytes) modificationDate(yyyy-MM-dd HH:mm) fileName\n\n" +
+          "  -" + OPTION_PATHONLY +
+          "  Display the paths of files and directories only.\n" +
           "  -" + OPTION_DIRECTORY +
           "  Directories are listed as plain files.\n" +
           "  -" + OPTION_HUMAN +
@@ -89,6 +92,7 @@ class Ls extends FsCommand {
 
   protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;
   protected String lineFormat;
+  private boolean pathOnly;
   protected boolean dirRecurse;
   private boolean orderReverse;
   private boolean orderTime;
@@ -107,10 +111,11 @@ class Ls extends FsCommand {
   @Override
   protected void processOptions(LinkedList<String> args)
   throws IOException {
-    CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
+    CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, OPTION_PATHONLY,
         OPTION_DIRECTORY, OPTION_HUMAN, OPTION_RECURSIVE, OPTION_REVERSE,
         OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
     cf.parse(args);
+    pathOnly = cf.getOpt(OPTION_PATHONLY);
     dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
     setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
     humanReadable = cf.getOpt(OPTION_HUMAN);
@@ -124,6 +129,15 @@ class Ls extends FsCommand {
   }
 
   /**
+   * Should display only paths of files and directories.
+   * @return true display paths only, false display all fields
+   */
+  @InterfaceAudience.Private
+  boolean isPathOnly() {
+    return this.pathOnly;
+  }
+
+  /**
    * Should the contents of the directory be shown or just the directory?
    * @return true if directory contents, false if just directory
    */
@@ -191,15 +205,23 @@ class Ls extends FsCommand {
   protected void processPaths(PathData parent, PathData ... items)
   throws IOException {
     if (parent != null && !isRecursive() && items.length != 0) {
-      out.println("Found " + items.length + " items");
+      if (!pathOnly) {
+        out.println("Found " + items.length + " items");
+      }
       Arrays.sort(items, getOrderComparator());
     }
-    adjustColumnWidths(items);
+    if (!pathOnly) {
+      adjustColumnWidths(items);
+    }
     super.processPaths(parent, items);
   }
 
   @Override
   protected void processPath(PathData item) throws IOException {
+    if (pathOnly) {
+      out.println(item.toString());
+      return;
+    }
     FileStatus stat = item.stat;
     String line = String.format(lineFormat,
         (stat.isDirectory() ? "d" : "-"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b50dcdc/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 2920e01..8ecc24a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -382,10 +382,11 @@ Return usage output.
 ls
 ----
 
-Usage: `hadoop fs -ls [-d] [-h] [-R] [-t] [-S] [-r] [-u] <args> `
+Usage: `hadoop fs -ls [-C] [-d] [-h] [-R] [-t] [-S] [-r] [-u] <args> `
 
 Options:
 
+* -C: Display the paths of files and directories only.
 * -d: Directories are listed as plain files.
 * -h: Format file sizes in a human-readable fashion (eg 64.0m instead of 67108864).
 * -R: Recursively list subdirectories encountered.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b50dcdc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
index 66403db..4a9103f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
@@ -74,6 +74,24 @@ public class TestLs {
     LinkedList<String> options = new LinkedList<String>();
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
+    assertTrue(ls.isDirRecurse());
+    assertFalse(ls.isHumanReadable());
+    assertFalse(ls.isRecursive());
+    assertFalse(ls.isOrderReverse());
+    assertFalse(ls.isOrderSize());
+    assertFalse(ls.isOrderTime());
+    assertFalse(ls.isUseAtime());
+  }
+
+  // check the -C option is recognised
+  @Test
+  public void processOptionsPathOnly() throws IOException {
+    LinkedList<String> options = new LinkedList<String>();
+    options.add("-C");
+    Ls ls = new Ls();
+    ls.processOptions(options);
+    assertTrue(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -90,6 +108,7 @@ public class TestLs {
     options.add("-d");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertFalse(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -106,6 +125,7 @@ public class TestLs {
     options.add("-h");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertTrue(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -122,6 +142,7 @@ public class TestLs {
     options.add("-R");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertTrue(ls.isRecursive());
@@ -138,6 +159,7 @@ public class TestLs {
     options.add("-r");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -154,6 +176,7 @@ public class TestLs {
     options.add("-S");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -170,6 +193,7 @@ public class TestLs {
     options.add("-t");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -187,6 +211,7 @@ public class TestLs {
     options.add("-S");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -205,6 +230,7 @@ public class TestLs {
     options.add("-r");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -221,6 +247,7 @@ public class TestLs {
     options.add("-u");
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertFalse(ls.isPathOnly());
     assertTrue(ls.isDirRecurse());
     assertFalse(ls.isHumanReadable());
     assertFalse(ls.isRecursive());
@@ -234,6 +261,7 @@ public class TestLs {
   @Test
   public void processOptionsAll() throws IOException {
     LinkedList<String> options = new LinkedList<String>();
+    options.add("-C"); // show file path only
     options.add("-d"); // directory
     options.add("-h"); // human readable
     options.add("-R"); // recursive
@@ -243,6 +271,7 @@ public class TestLs {
     options.add("-u"); // show atime
     Ls ls = new Ls();
     ls.processOptions(options);
+    assertTrue(ls.isPathOnly());
     assertFalse(ls.isDirRecurse());
     assertTrue(ls.isHumanReadable());
     assertFalse(ls.isRecursive()); // -d overrules -R
@@ -981,6 +1010,44 @@ public class TestLs {
     verifyNoMoreInteractions(out);
   }
 
+  // check path only display (-C option)
+  @Test
+  public void processPathDirectoryPathOnly() throws IOException {
+    TestFile testfile01 = new TestFile("testDirectory", "testFile01");
+    TestFile testfile02 = new TestFile("testDirectory", "testFile02");
+    TestFile testfile03 = new TestFile("testDirectory", "testFile03");
+    TestFile testfile04 = new TestFile("testDirectory", "testFile04");
+    TestFile testfile05 = new TestFile("testDirectory", "testFile05");
+    TestFile testfile06 = new TestFile("testDirectory", "testFile06");
+
+    TestFile testDir = new TestFile("", "testDirectory");
+    testDir.setIsDir(true);
+    testDir.addContents(testfile01, testfile02, testfile03, testfile04,
+        testfile05, testfile06);
+
+    LinkedList<PathData> pathData = new LinkedList<PathData>();
+    pathData.add(testDir.getPathData());
+
+    PrintStream out = mock(PrintStream.class);
+
+    Ls ls = new Ls();
+    ls.out = out;
+
+    LinkedList<String> options = new LinkedList<String>();
+    options.add("-C");
+    ls.processOptions(options);
+
+    ls.processArguments(pathData);
+    InOrder inOrder = inOrder(out);
+    inOrder.verify(out).println(testfile01.getPath().toString());
+    inOrder.verify(out).println(testfile02.getPath().toString());
+    inOrder.verify(out).println(testfile03.getPath().toString());
+    inOrder.verify(out).println(testfile04.getPath().toString());
+    inOrder.verify(out).println(testfile05.getPath().toString());
+    inOrder.verify(out).println(testfile06.getPath().toString());
+    verifyNoMoreInteractions(out);
+  }
+
   // check the deprecated flag isn't set
   @Test
   public void isDeprecated() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b50dcdc/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 57cce14..62afff2 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -54,7 +54,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-ls \[-d\] \[-h\] \[-R\] \[-t\] \[-S\] \[-r\] \[-u\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
+          <expected-output>^-ls \[-C\] \[-d\] \[-h\] \[-R\] \[-t\] \[-S\] \[-r\] \[-u\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -94,6 +94,10 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
+          <expected-output>^\s*-C\s+Display the paths of files and directories only\.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
           <expected-output>^\s*-d\s+Directories are listed as plain files\.( )*</expected-output>
         </comparator>
         <comparator>


[44/50] [abbrv] hadoop git commit: YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by Vinod Kumar Vavilapalli.

Posted by ji...@apache.org.
YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by Vinod Kumar Vavilapalli.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7401e5b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7401e5b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7401e5b5

Branch: refs/heads/HDFS-7240
Commit: 7401e5b5e8060b6b027d714b5ceb641fcfe5b598
Parents: 563eb1a
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed May 20 08:53:50 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Wed May 20 08:59:26 2015 +0900

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java   | 1 +
 .../server/resourcemanager/recovery/FileSystemRMStateStore.java   | 2 +-
 3 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7401e5b5/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9ba9fd8..4bd4132 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -523,6 +523,9 @@ Release 2.7.1 - UNRELEASED
 
     YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via xgong)
 
+    YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager.
+    (Vinod Kumar Vavilapalli via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7401e5b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7990421..2d9431d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -230,6 +230,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
         success = true;
         break;
       default:
+        break;
     }
     
     if (success) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7401e5b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 6920bb5..0d97d6f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -100,7 +100,7 @@ public class FileSystemRMStateStore extends RMStateStore {
   private Path dtSequenceNumberPath = null;
   private int fsNumRetries;
   private long fsRetryInterval;
-  private boolean isHDFS;
+  private volatile boolean isHDFS;
 
   @VisibleForTesting
   Path fsWorkingPath;


[06/50] [abbrv] hadoop git commit: MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists to avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state. Contributed by zhihai xu.

Posted by ji...@apache.org.
MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists
to avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED
state. Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea90144
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea90144
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea90144

Branch: refs/heads/HDFS-7240
Commit: 1ea90144d58443a7431ff33ba49ee19278ebe42b
Parents: 3bef7c8
Author: Devaraj K <de...@apache.org>
Authored: Fri May 15 12:31:32 2015 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Fri May 15 12:31:32 2015 +0530

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  4 +++
 .../mapreduce/v2/hs/HistoryFileManager.java     | 11 +++---
 .../mapreduce/v2/hs/TestHistoryFileManager.java | 35 ++++++++++++++++++++
 3 files changed, 46 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea90144/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index bc1f427..7e34297 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -425,6 +425,10 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-5708. Duplicate String.format in YarnOutputFiles.getSpillFileForWrite.
     (Konstantin Weitz via devaraj)
 
+    MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists to 
+    avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
+    (zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea90144/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 69f814d..f0786da 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -303,8 +303,9 @@ public class HistoryFileManager extends AbstractService {
     private JobIndexInfo jobIndexInfo;
     private HistoryInfoState state;
 
-    private HistoryFileInfo(Path historyFile, Path confFile, Path summaryFile,
-        JobIndexInfo jobIndexInfo, boolean isInDone) {
+    @VisibleForTesting
+    protected HistoryFileInfo(Path historyFile, Path confFile,
+        Path summaryFile, JobIndexInfo jobIndexInfo, boolean isInDone) {
       this.historyFile = historyFile;
       this.confFile = confFile;
       this.summaryFile = summaryFile;
@@ -337,7 +338,8 @@ public class HistoryFileManager extends AbstractService {
              + " historyFile = " + historyFile;
     }
 
-    private synchronized void moveToDone() throws IOException {
+    @VisibleForTesting
+    synchronized void moveToDone() throws IOException {
       if (LOG.isDebugEnabled()) {
         LOG.debug("moveToDone: " + historyFile);
       }
@@ -368,7 +370,8 @@ public class HistoryFileManager extends AbstractService {
           paths.add(confFile);
         }
 
-        if (summaryFile == null) {
+        if (summaryFile == null || !intermediateDoneDirFc.util().exists(
+            summaryFile)) {
           LOG.info("No summary file for job: " + jobId);
         } else {
           String jobSummaryString = getJobSummary(intermediateDoneDirFc,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea90144/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
index 1c5cc5c..7694de9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
@@ -35,7 +35,12 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.test.CoreTestDriver;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -221,4 +226,34 @@ public class TestHistoryFileManager {
 
   }
 
+  @Test
+  public void testHistoryFileInfoSummaryFileNotExist() throws Exception {
+    HistoryFileManagerTest hmTest = new HistoryFileManagerTest();
+    String job = "job_1410889000000_123456";
+    Path summaryFile = new Path(job + ".summary");
+    JobIndexInfo jobIndexInfo = new JobIndexInfo();
+    jobIndexInfo.setJobId(TypeConverter.toYarn(JobID.forName(job)));
+    Configuration conf = dfsCluster.getConfiguration(0);
+    conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR,
+        "/" + UUID.randomUUID());
+    conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,
+        "/" + UUID.randomUUID());
+    hmTest.serviceInit(conf);
+    HistoryFileInfo info = hmTest.getHistoryFileInfo(null, null,
+        summaryFile, jobIndexInfo, false);
+    info.moveToDone();
+    Assert.assertFalse(info.didMoveFail());
+  }
+
+  static class HistoryFileManagerTest extends HistoryFileManager {
+    public HistoryFileManagerTest() {
+      super();
+    }
+    public HistoryFileInfo getHistoryFileInfo(Path historyFile,
+        Path confFile, Path summaryFile, JobIndexInfo jobIndexInfo,
+        boolean isInDone) {
+      return new HistoryFileInfo(historyFile, confFile, summaryFile,
+          jobIndexInfo, isInDone);
+    }
+  }
 }


[22/50] [abbrv] hadoop git commit: HDFS-8345. Storage policy APIs must be exposed via the FileSystem interface. (Arpit Agarwal)

Posted by ji...@apache.org.
HDFS-8345. Storage policy APIs must be exposed via the FileSystem interface. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2190bf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2190bf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2190bf1

Branch: refs/heads/HDFS-7240
Commit: a2190bf15d25e01fb4b220ba6401ce2f787a5c61
Parents: 060c84e
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon May 18 11:36:29 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon May 18 11:36:29 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/fs/AbstractFileSystem.java    | 27 ++++++++
 .../apache/hadoop/fs/BlockStoragePolicySpi.java | 72 ++++++++++++++++++++
 .../java/org/apache/hadoop/fs/FileContext.java  | 33 +++++++++
 .../java/org/apache/hadoop/fs/FileSystem.java   | 28 ++++++++
 .../org/apache/hadoop/fs/FilterFileSystem.java  | 13 ++++
 .../java/org/apache/hadoop/fs/FilterFs.java     | 13 ++++
 .../org/apache/hadoop/fs/viewfs/ChRootedFs.java | 14 ++++
 .../org/apache/hadoop/fs/viewfs/ViewFs.java     | 14 ++++
 .../org/apache/hadoop/fs/TestHarFileSystem.java |  7 ++
 .../hdfs/protocol/BlockStoragePolicy.java       |  8 ++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../main/java/org/apache/hadoop/fs/Hdfs.java    | 13 ++++
 .../hadoop/hdfs/DistributedFileSystem.java      | 27 +++++---
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  5 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  5 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java     | 64 ++++++++++-------
 16 files changed, 308 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 7af5fa7..cb3fb86 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -23,6 +23,7 @@ import java.lang.reflect.Constructor;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -1221,6 +1222,32 @@ public abstract class AbstractFileSystem {
         + " doesn't support deleteSnapshot");
   }
 
+  /**
+   * Set the storage policy for a given file or directory.
+   *
+   * @param path file or directory path.
+   * @param policyName the name of the target storage policy. The list
+   *                   of supported Storage policies can be retrieved
+   *                   via {@link #getAllStoragePolicies}.
+   */
+  public void setStoragePolicy(final Path path, final String policyName)
+      throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support setStoragePolicy");
+  }
+
+  /**
+   * Retrieve all the storage policies supported by this file system.
+   *
+   * @return all storage policies supported by this filesystem.
+   * @throws IOException
+   */
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getAllStoragePolicies");
+  }
+
   @Override //Object
   public int hashCode() {
     return myUri.hashCode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockStoragePolicySpi.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockStoragePolicySpi.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockStoragePolicySpi.java
new file mode 100644
index 0000000..1d6502e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockStoragePolicySpi.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A storage policy specifies the placement of block replicas on specific
+ * storage types.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public interface BlockStoragePolicySpi {
+
+  /**
+   * Return the name of the storage policy. Policies are uniquely
+   * identified by name.
+   *
+   * @return the name of the storage policy.
+   */
+  String getName();
+
+  /**
+   * Return the preferred storage types associated with this policy. These
+   * storage types are used sequentially for successive block replicas.
+   *
+   * @return preferred storage types used for placing block replicas.
+   */
+  StorageType[] getStorageTypes();
+
+  /**
+   * Get the fallback storage types for creating new block replicas. Fallback
+   * storage types are used if the preferred storage types are not available.
+   *
+   * @return fallback storage types for new block replicas..
+   */
+  StorageType[] getCreationFallbacks();
+
+  /**
+   * Get the fallback storage types for replicating existing block replicas.
+   * Fallback storage types are used if the preferred storage types are not
+   * available.
+   *
+   * @return fallback storage types for replicating existing block replicas.
+   */
+  StorageType[] getReplicationFallbacks();
+
+  /**
+   * Returns true if the policy is inherit-only and cannot be changed for
+   * an existing file.
+   *
+   * @return true if the policy is inherit-only.
+   */
+  boolean isCopyOnCreateFile();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 16cb591..122ddf6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -24,6 +24,7 @@ import java.io.OutputStream;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.IdentityHashMap;
@@ -2665,4 +2666,36 @@ public class FileContext {
       }
     }.resolve(this, absF);
   }
+
+  /**
+   * Set the storage policy for a given file or directory.
+   *
+   * @param path file or directory path.
+   * @param policyName the name of the target storage policy. The list
+   *                   of supported Storage policies can be retrieved
+   *                   via {@link #getAllStoragePolicies}.
+   */
+  public void setStoragePolicy(final Path path, final String policyName)
+      throws IOException {
+    final Path absF = fixRelativePart(path);
+    new FSLinkResolver<Void>() {
+      @Override
+      public Void next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        fs.setStoragePolicy(path, policyName);
+        return null;
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
+   * Retrieve all the storage policies supported by this file system.
+   *
+   * @return all storage policies supported by this filesystem.
+   * @throws IOException
+   */
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return defaultFS.getAllStoragePolicies();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index b508727..33d7c88 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -26,6 +26,7 @@ import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -2609,6 +2610,33 @@ public abstract class FileSystem extends Configured implements Closeable {
         + " doesn't support removeXAttr");
   }
 
+  /**
+   * Set the storage policy for a given file or directory.
+   *
+   * @param src file or directory path.
+   * @param policyName the name of the target storage policy. The list
+   *                   of supported Storage policies can be retrieved
+   *                   via {@link #getAllStoragePolicies}.
+   * @throws IOException
+   */
+  public void setStoragePolicy(final Path src, final String policyName)
+      throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support setStoragePolicy");
+  }
+
+  /**
+   * Retrieve all the storage policies supported by this file system.
+   *
+   * @return all storage policies supported by this filesystem.
+   * @throws IOException
+   */
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+        + " doesn't support getAllStoragePolicies");
+  }
+
   // making it volatile to be able to do a double checked locking
   private volatile static boolean FILE_SYSTEMS_LOADED = false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index ec056a4..11f3b23 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs;
 import java.io.*;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -619,4 +620,16 @@ public class FilterFileSystem extends FileSystem {
   public void removeXAttr(Path path, String name) throws IOException {
     fs.removeXAttr(path, name);
   }
+
+  @Override
+  public void setStoragePolicy(Path src, String policyName)
+      throws IOException {
+    fs.setStoragePolicy(src, policyName);
+  }
+
+  @Override
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return fs.getAllStoragePolicies();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 2ba6318..539b26e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -20,6 +20,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -397,4 +398,16 @@ public abstract class FilterFs extends AbstractFileSystem {
       throws IOException {
     myFs.deleteSnapshot(path, snapshotName);
   }
+
+  @Override
+  public void setStoragePolicy(Path path, String policyName)
+      throws IOException {
+    myFs.setStoragePolicy(path, policyName);
+  }
+
+  @Override
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return myFs.getAllStoragePolicies();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index a05a700..4e5a0d5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -20,6 +20,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -28,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -378,6 +380,18 @@ class ChRootedFs extends AbstractFileSystem {
   }
 
   @Override
+  public void setStoragePolicy(Path path, String policyName)
+    throws IOException {
+    myFs.setStoragePolicy(fullPath(path), policyName);
+  }
+
+  @Override
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return myFs.getAllStoragePolicies();
+  }
+
+  @Override
   public void setVerifyChecksum(final boolean verifyChecksum)
       throws IOException, UnresolvedLinkException {
     myFs.setVerifyChecksum(verifyChecksum);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index a23aa86..bec292c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -740,6 +740,14 @@ public class ViewFs extends AbstractFileSystem {
     res.targetFileSystem.deleteSnapshot(res.remainingPath, snapshotName);
   }
 
+  @Override
+  public void setStoragePolicy(final Path path, final String policyName)
+      throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.setStoragePolicy(res.remainingPath, policyName);
+  }
+
   /*
    * An instance of this class represents an internal dir of the viewFs 
    * ie internal dir of the mount table.
@@ -1070,5 +1078,11 @@ public class ViewFs extends AbstractFileSystem {
       checkPathIsSlash(path);
       throw readOnlyMountTable("deleteSnapshot", path);
     }
+
+    @Override
+    public void setStoragePolicy(Path path, String policyName)
+        throws IOException {
+      throw readOnlyMountTable("setStoragePolicy", path);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
index 374bb2e..46f24fc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
@@ -34,6 +34,7 @@ import org.junit.Test;
 import java.io.IOException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.Iterator;
 import java.util.List;
@@ -205,6 +206,12 @@ public class TestHarFileSystem {
     public AclStatus getAclStatus(Path path) throws IOException;
 
     public void access(Path path, FsAction mode) throws IOException;
+
+    public void setStoragePolicy(Path src, String policyName)
+        throws IOException;
+
+    public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+        throws IOException;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
index 9ecf6e8..2624960 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
@@ -25,6 +25,7 @@ import java.util.List;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.StorageType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -34,7 +35,7 @@ import org.slf4j.LoggerFactory;
  * for the replicas of a block.
  */
 @InterfaceAudience.Private
-public class BlockStoragePolicy {
+public class BlockStoragePolicy implements BlockStoragePolicySpi {
   public static final Logger LOG = LoggerFactory.getLogger(BlockStoragePolicy
       .class);
 
@@ -239,18 +240,22 @@ public class BlockStoragePolicy {
     return id;
   }
 
+  @Override
   public String getName() {
     return name;
   }
 
+  @Override
   public StorageType[] getStorageTypes() {
     return this.storageTypes;
   }
 
+  @Override
   public StorageType[] getCreationFallbacks() {
     return this.creationFallbacks;
   }
 
+  @Override
   public StorageType[] getReplicationFallbacks() {
     return this.replicationFallbacks;
   }
@@ -265,6 +270,7 @@ public class BlockStoragePolicy {
     return null;
   }
 
+  @Override
   public boolean isCopyOnCreateFile() {
     return copyOnCreateFile;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8d0c5b6..4270a9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -567,6 +567,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8157. Writes to RAM DISK reserve locked memory for block files.
     (Arpit Agarwal)
 
+    HDFS-8345. Storage policy APIs must be exposed via the FileSystem
+    interface. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index b776849..3f78b31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -22,6 +22,8 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -465,6 +467,17 @@ public class Hdfs extends AbstractFileSystem {
     dfs.checkAccess(getUriPath(path), mode);
   }
 
+  @Override
+  public void setStoragePolicy(Path path, String policyName) throws IOException {
+    dfs.setStoragePolicy(getUriPath(path), policyName);
+  }
+
+  @Override
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return Arrays.asList(dfs.getStoragePolicies());
+  }
+
   /**
    * Renew an existing delegation token.
    * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4ca6d57..902636c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -23,6 +23,8 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -32,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStorageLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
@@ -532,6 +535,7 @@ public class DistributedFileSystem extends FileSystem {
    * @param src The source path referring to either a directory or a file.
    * @param policyName The name of the storage policy.
    */
+  @Override
   public void setStoragePolicy(final Path src, final String policyName)
       throws IOException {
     statistics.incrementWriteOps(1);
@@ -546,19 +550,24 @@ public class DistributedFileSystem extends FileSystem {
       @Override
       public Void next(final FileSystem fs, final Path p)
           throws IOException {
-        if (fs instanceof DistributedFileSystem) {
-          ((DistributedFileSystem) fs).setStoragePolicy(p, policyName);
-          return null;
-        } else {
-          throw new UnsupportedOperationException(
-              "Cannot perform setStoragePolicy on a non-DistributedFileSystem: "
-                  + src + " -> " + p);
-        }
+        fs.setStoragePolicy(p, policyName);
+        return null;
       }
     }.resolve(this, absF);
   }
 
-  /** Get all the existing storage policies */
+  @Override
+  public Collection<BlockStoragePolicy> getAllStoragePolicies()
+      throws IOException {
+    return Arrays.asList(dfs.getStoragePolicies());
+  }
+
+  /**
+   * Deprecated. Prefer {@link FileSystem#getAllStoragePolicies()}
+   * @return
+   * @throws IOException
+   */
+  @Deprecated
   public BlockStoragePolicy[] getStoragePolicies() throws IOException {
     statistics.incrementReadOps(1);
     return dfs.getStoragePolicies();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 0710f3e..8715ce4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -152,8 +153,8 @@ public class Mover {
   }
 
   private void initStoragePolicies() throws IOException {
-    BlockStoragePolicy[] policies = dispatcher.getDistributedFileSystem()
-        .getStoragePolicies();
+    Collection<BlockStoragePolicy> policies =
+        dispatcher.getDistributedFileSystem().getAllStoragePolicies();
     for (BlockStoragePolicy policy : policies) {
       this.blockStoragePolicies[policy.getId()] = policy;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index e3bdffa..98c8a6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.util.Tool;
 
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -97,7 +98,7 @@ public class StoragePolicyAdmin extends Configured implements Tool {
     public int run(Configuration conf, List<String> args) throws IOException {
       final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
       try {
-        BlockStoragePolicy[] policies = dfs.getStoragePolicies();
+        Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies();
         System.out.println("Block Storage Policies:");
         for (BlockStoragePolicy policy : policies) {
           if (policy != null) {
@@ -155,7 +156,7 @@ public class StoragePolicyAdmin extends Configured implements Tool {
           System.out.println("The storage policy of " + path + " is unspecified");
           return 0;
         }
-        BlockStoragePolicy[] policies = dfs.getStoragePolicies();
+        Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies();
         for (BlockStoragePolicy p : policies) {
           if (p.getId() == storagePolicyId) {
             System.out.println("The storage policy of " + path + ":\n" + p);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2190bf1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 5e3b55f..ea69f97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -25,7 +25,9 @@ import java.io.IOException;
 import java.util.*;
 
 import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -1150,30 +1152,6 @@ public class TestBlockStoragePolicy {
     Assert.assertEquals(3, targets.length);
   }
 
-  /**
-   * Test getting all the storage policies from the namenode
-   */
-  @Test
-  public void testGetAllStoragePolicies() throws Exception {
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(0).build();
-    cluster.waitActive();
-    final DistributedFileSystem fs = cluster.getFileSystem();
-    try {
-      BlockStoragePolicy[] policies = fs.getStoragePolicies();
-      Assert.assertEquals(6, policies.length);
-      Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
-          policies[0].toString());
-      Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),
-          policies[1].toString());
-      Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(),
-          policies[2].toString());
-    } finally {
-      IOUtils.cleanup(null, fs);
-      cluster.shutdown();
-    }
-  }
-
   @Test
   public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
     //HDFS8219
@@ -1217,4 +1195,42 @@ public class TestBlockStoragePolicy {
       cluster.shutdown();
     }
   }
+
+  /**
+   * Verify that {@link FileSystem#getAllStoragePolicies} returns all
+   * known storage policies for DFS.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testGetAllStoragePoliciesFromFs() throws IOException {
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(REPLICATION)
+        .storageTypes(
+            new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
+        .build();
+    try {
+      cluster.waitActive();
+
+      // Get policies via {@link FileSystem#getAllStoragePolicies}
+      Set<String> policyNamesSet1 = new HashSet<>();
+      for (BlockStoragePolicySpi policy :
+          cluster.getFileSystem().getAllStoragePolicies()) {
+        policyNamesSet1.add(policy.getName());
+      }
+
+      // Get policies from the default BlockStoragePolicySuite.
+      BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
+      Set<String> policyNamesSet2 = new HashSet<>();
+      for (BlockStoragePolicy policy : suite.getAllPolicies()) {
+        policyNamesSet2.add(policy.getName());
+      }
+
+      // Ensure that we got the same set of policies in both cases.
+      Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
+      Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }


[40/50] [abbrv] hadoop git commit: YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted. Contributed by Varun Vasudev

Posted by ji...@apache.org.
YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74389665
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74389665
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74389665

Branch: refs/heads/HDFS-7240
Commit: 7438966586f1896ab3e8b067d47a4af28a894106
Parents: e422e76
Author: Jian He <ji...@apache.org>
Authored: Tue May 19 14:20:31 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue May 19 14:20:31 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../pom.xml                                     |   5 +
 .../distributedshell/ApplicationMaster.java     |  54 +++++++-
 .../distributedshell/TestDSAppMaster.java       | 130 +++++++++++++++++++
 4 files changed, 187 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74389665/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 34cd051..5a6fb38 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -420,6 +420,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3302. TestDockerContainerExecutor should run automatically if it can
     detect docker in the usual place (Ravindra Kumar Naik via raviprak)
 
+    YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted.
+    (Varun Vasudev via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74389665/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index 5b4440f..09a56ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -116,6 +116,11 @@
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74389665/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b62c24c..b28c0c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -30,10 +30,12 @@ import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.Vector;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -277,6 +279,10 @@ public class ApplicationMaster {
   private final String linux_bash_command = "bash";
   private final String windows_command = "cmd /c";
 
+  @VisibleForTesting
+  protected final Set<ContainerId> launchedContainers =
+      Collections.newSetFromMap(new ConcurrentHashMap<ContainerId, Boolean>());
+
   /**
    * @param args Command line args
    */
@@ -601,8 +607,12 @@ public class ApplicationMaster {
         response.getContainersFromPreviousAttempts();
     LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
       + " previous attempts' running containers on AM registration.");
+    for(Container container: previousAMRunningContainers) {
+      launchedContainers.add(container.getId());
+    }
     numAllocatedContainers.addAndGet(previousAMRunningContainers.size());
 
+
     int numTotalContainersToRequest =
         numTotalContainers - previousAMRunningContainers.size();
     // Setup ask for containers from RM
@@ -715,8 +725,9 @@ public class ApplicationMaster {
 
     return success;
   }
-  
-  private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
+
+  @VisibleForTesting
+  class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
     @SuppressWarnings("unchecked")
     @Override
     public void onContainersCompleted(List<ContainerStatus> completedContainers) {
@@ -731,6 +742,14 @@ public class ApplicationMaster {
 
         // non complete containers should not be here
         assert (containerStatus.getState() == ContainerState.COMPLETE);
+        // ignore containers we know nothing about - probably from a previous
+        // attempt
+        if (!launchedContainers.contains(containerStatus.getContainerId())) {
+          LOG.info("Ignoring completed status of "
+              + containerStatus.getContainerId()
+              + "; unknown container(probably launched by previous attempt)");
+          continue;
+        }
 
         // increment counters for completed/failed containers
         int exitStatus = containerStatus.getExitStatus();
@@ -796,14 +815,13 @@ public class ApplicationMaster {
         // + ", containerToken"
         // +allocatedContainer.getContainerToken().getIdentifier().toString());
 
-        LaunchContainerRunnable runnableLaunchContainer =
-            new LaunchContainerRunnable(allocatedContainer, containerListener);
-        Thread launchThread = new Thread(runnableLaunchContainer);
+        Thread launchThread = createLaunchContainerThread(allocatedContainer);
 
         // launch and start the container on a separate thread to keep
         // the main thread unblocked
         // as all containers may not be allocated at one go.
         launchThreads.add(launchThread);
+        launchedContainers.add(allocatedContainer.getId());
         launchThread.start();
       }
     }
@@ -1150,4 +1168,30 @@ public class ApplicationMaster {
           + appAttemptId.toString(), e);
     }
   }
+
+  RMCallbackHandler getRMCallbackHandler() {
+    return new RMCallbackHandler();
+  }
+
+  @VisibleForTesting
+  void setAmRMClient(AMRMClientAsync client) {
+    this.amRMClient = client;
+  }
+
+  @VisibleForTesting
+  int getNumCompletedContainers() {
+    return numCompletedContainers.get();
+  }
+
+  @VisibleForTesting
+  boolean getDone() {
+    return done;
+  }
+
+  @VisibleForTesting
+  Thread createLaunchContainerThread(Container allocatedContainer) {
+    LaunchContainerRunnable runnableLaunchContainer =
+        new LaunchContainerRunnable(allocatedContainer, containerListener);
+    return new Thread(runnableLaunchContainer);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74389665/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
index 11e840a..0fed14d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
@@ -20,13 +20,143 @@ package org.apache.hadoop.yarn.applications.distributedshell;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mockito;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A bunch of tests to make sure that the container allocations
+ * and releases occur correctly.
+ */
 public class TestDSAppMaster {
 
+  static class TestAppMaster extends ApplicationMaster {
+    private int threadsLaunched = 0;
+
+    @Override
+    protected Thread createLaunchContainerThread(Container allocatedContainer) {
+      threadsLaunched++;
+      launchedContainers.add(allocatedContainer.getId());
+      return new Thread();
+    }
+
+    void setNumTotalContainers(int numTotalContainers) {
+      this.numTotalContainers = numTotalContainers;
+    }
+
+    int getAllocatedContainers() {
+      return this.numAllocatedContainers.get();
+    }
+
+    @Override
+    void startTimelineClient(final Configuration conf) throws YarnException,
+        IOException, InterruptedException {
+      timelineClient = null;
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testDSAppMasterAllocateHandler() throws Exception {
+
+    TestAppMaster master = new TestAppMaster();
+    int targetContainers = 2;
+    AMRMClientAsync mockClient = Mockito.mock(AMRMClientAsync.class);
+    master.setAmRMClient(mockClient);
+    master.setNumTotalContainers(targetContainers);
+    Mockito.doNothing().when(mockClient)
+        .addContainerRequest(Matchers.any(AMRMClient.ContainerRequest.class));
+
+    ApplicationMaster.RMCallbackHandler handler = master.getRMCallbackHandler();
+
+    List<Container> containers = new ArrayList<>(1);
+    ContainerId id1 = BuilderUtils.newContainerId(1, 1, 1, 1);
+    containers.add(generateContainer(id1));
+
+    master.numRequestedContainers.set(targetContainers);
+
+    // first allocate a single container, everything should be fine
+    handler.onContainersAllocated(containers);
+    Assert.assertEquals("Wrong container allocation count", 1,
+        master.getAllocatedContainers());
+    Mockito.verifyZeroInteractions(mockClient);
+    Assert.assertEquals("Incorrect number of threads launched", 1,
+        master.threadsLaunched);
+
+    // now send 3 extra containers
+    containers.clear();
+    ContainerId id2 = BuilderUtils.newContainerId(1, 1, 1, 2);
+    containers.add(generateContainer(id2));
+    ContainerId id3 = BuilderUtils.newContainerId(1, 1, 1, 3);
+    containers.add(generateContainer(id3));
+    ContainerId id4 = BuilderUtils.newContainerId(1, 1, 1, 4);
+    containers.add(generateContainer(id4));
+    handler.onContainersAllocated(containers);
+    Assert.assertEquals("Wrong final container allocation count", 4,
+        master.getAllocatedContainers());
+
+    Assert.assertEquals("Incorrect number of threads launched", 4,
+        master.threadsLaunched);
+
+    // make sure we handle completion events correctly
+    List<ContainerStatus> status = new ArrayList<>();
+    status.add(generateContainerStatus(id1, ContainerExitStatus.SUCCESS));
+    status.add(generateContainerStatus(id2, ContainerExitStatus.SUCCESS));
+    status.add(generateContainerStatus(id3, ContainerExitStatus.ABORTED));
+    status.add(generateContainerStatus(id4, ContainerExitStatus.ABORTED));
+    handler.onContainersCompleted(status);
+
+    Assert.assertEquals("Unexpected number of completed containers",
+        targetContainers, master.getNumCompletedContainers());
+    Assert.assertTrue("Master didn't finish containers as expected",
+        master.getDone());
+
+    // test for events from containers we know nothing about
+    // these events should be ignored
+    status = new ArrayList<>();
+    ContainerId id5 = BuilderUtils.newContainerId(1, 1, 1, 5);
+    status.add(generateContainerStatus(id5, ContainerExitStatus.ABORTED));
+    Assert.assertEquals("Unexpected number of completed containers",
+        targetContainers, master.getNumCompletedContainers());
+    Assert.assertTrue("Master didn't finish containers as expected",
+        master.getDone());
+    status.add(generateContainerStatus(id5, ContainerExitStatus.SUCCESS));
+    Assert.assertEquals("Unexpected number of completed containers",
+        targetContainers, master.getNumCompletedContainers());
+    Assert.assertTrue("Master didn't finish containers as expected",
+        master.getDone());
+  }
+
+  private Container generateContainer(ContainerId cid) {
+    return Container.newInstance(cid, NodeId.newInstance("host", 5000),
+      "host:80", Resource.newInstance(1024, 1), Priority.newInstance(0), null);
+  }
+
+  private ContainerStatus
+      generateContainerStatus(ContainerId id, int exitStatus) {
+    return ContainerStatus.newInstance(id, ContainerState.COMPLETE, "",
+      exitStatus);
+  }
+
   @Test
   public void testTimelineClientInDSAppMaster() throws Exception {
     ApplicationMaster appMaster = new ApplicationMaster();


[47/50] [abbrv] hadoop git commit: HADOOP-11698. Remove DistCpV1 and Logalyzer. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
HADOOP-11698. Remove DistCpV1 and Logalyzer. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aa730ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aa730ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aa730ce

Branch: refs/heads/HDFS-7240
Commit: 4aa730ce85d4c69c0ea8227c6c5276d96454c426
Parents: ce53c8e
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed May 20 19:25:45 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed May 20 19:25:45 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |    3 +
 .../java/org/apache/hadoop/tools/DistCpV1.java  | 1674 ------------------
 .../java/org/apache/hadoop/tools/Logalyzer.java |  329 ----
 .../org/apache/hadoop/tools/TestCopyFiles.java  | 1077 -----------
 .../org/apache/hadoop/tools/TestLogalyzer.java  |  133 --
 5 files changed, 3 insertions(+), 3213 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa730ce/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b0b8fb7..1624ce2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -26,6 +26,9 @@ Trunk (Unreleased)
     HADOOP-11627. Remove io.native.lib.available.
     (Brahma Reddy Battula via aajisaka)
 
+    HADOOP-11698. Remove DistCpV1 and Logalyzer.
+    (Brahma Reddy Battula via aajisaka)
+
   NEW FEATURES
 
     HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa730ce/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
deleted file mode 100644
index 39ac5c3..0000000
--- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
+++ /dev/null
@@ -1,1674 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.tools;
-
-import java.io.BufferedReader;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-import java.util.Stack;
-import java.util.StringTokenizer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileChecksum;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.Trash;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.SequenceFile.Reader;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.SequenceFile.Writer;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.mapred.FileOutputFormat;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.InvalidInputException;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Mapper;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.SequenceFileRecordReader;
-import org.apache.hadoop.mapreduce.JobSubmissionFiles;
-import org.apache.hadoop.mapreduce.security.TokenCache;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-
-/**
- * A Map-reduce program to recursively copy directories between
- * different file-systems.
- */
-@Deprecated
-public class DistCpV1 implements Tool {
-  public static final Log LOG = LogFactory.getLog(DistCpV1.class);
-
-  private static final String NAME = "distcp";
-
-  private static final String usage = NAME
-    + " [OPTIONS] <srcurl>* <desturl>" +
-    "\n\nOPTIONS:" +
-    "\n-p[rbugpt]             Preserve status" +
-    "\n                       r: replication number" +
-    "\n                       b: block size" +
-    "\n                       u: user" + 
-    "\n                       g: group" +
-    "\n                       p: permission" +
-    "\n                       t: modification and access times" +
-    "\n                       -p alone is equivalent to -prbugpt" +
-    "\n-i                     Ignore failures" +
-    "\n-basedir <basedir>     Use <basedir> as the base directory when copying files from <srcurl>" +
-    "\n-log <logdir>          Write logs to <logdir>" +
-    "\n-m <num_maps>          Maximum number of simultaneous copies" +
-    "\n-overwrite             Overwrite destination" +
-    "\n-update                Overwrite if src size different from dst size" +
-    "\n-skipcrccheck          Do not use CRC check to determine if src is " +
-    "\n                       different from dest. Relevant only if -update" +
-    "\n                       is specified" +
-    "\n-f <urilist_uri>       Use list at <urilist_uri> as src list" +
-    "\n-filelimit <n>         Limit the total number of files to be <= n" +
-    "\n-sizelimit <n>         Limit the total size to be <= n bytes" +
-    "\n-delete                Delete the files existing in the dst but not in src" +
-    "\n-dryrun                Display count of files and total size of files" +
-    "\n                        in src and then exit. Copy is not done at all." +
-    "\n                        desturl should not be speicified with out -update." +
-    "\n-mapredSslConf <f>     Filename of SSL configuration for mapper task" +
-    
-    "\n\nNOTE 1: if -overwrite or -update are set, each source URI is " +
-    "\n      interpreted as an isomorphic update to an existing directory." +
-    "\nFor example:" +
-    "\nhadoop " + NAME + " -p -update \"hdfs://A:8020/user/foo/bar\" " +
-    "\"hdfs://B:8020/user/foo/baz\"\n" +
-    "\n     would update all descendants of 'baz' also in 'bar'; it would " +
-    "\n     *not* update /user/foo/baz/bar" + 
-
-    "\n\nNOTE 2: The parameter <n> in -filelimit and -sizelimit can be " +
-    "\n     specified with symbolic representation.  For examples," +
-    "\n       1230k = 1230 * 1024 = 1259520" +
-    "\n       891g = 891 * 1024^3 = 956703965184" +
-    
-    "\n";
-  
-  private static final long BYTES_PER_MAP =  256 * 1024 * 1024;
-  private static final int MAX_MAPS_PER_NODE = 20;
-  private static final int SYNC_FILE_MAX = 10;
-  private static final int DEFAULT_FILE_RETRIES = 3;
-
-  static enum Counter { COPY, SKIP, FAIL, BYTESCOPIED, BYTESEXPECTED }
-  static enum Options {
-    DELETE("-delete", NAME + ".delete"),
-    FILE_LIMIT("-filelimit", NAME + ".limit.file"),
-    SIZE_LIMIT("-sizelimit", NAME + ".limit.size"),
-    IGNORE_READ_FAILURES("-i", NAME + ".ignore.read.failures"),
-    PRESERVE_STATUS("-p", NAME + ".preserve.status"),
-    OVERWRITE("-overwrite", NAME + ".overwrite.always"),
-    UPDATE("-update", NAME + ".overwrite.ifnewer"),
-    SKIPCRC("-skipcrccheck", NAME + ".skip.crc.check");
-
-    final String cmd, propertyname;
-
-    private Options(String cmd, String propertyname) {
-      this.cmd = cmd;
-      this.propertyname = propertyname;
-    }
-    
-    private long parseLong(String[] args, int offset) {
-      if (offset ==  args.length) {
-        throw new IllegalArgumentException("<n> not specified in " + cmd);
-      }
-      long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
-      if (n <= 0) {
-        throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
-      }
-      return n;
-    }
-  }
-  static enum FileAttribute {
-    BLOCK_SIZE, REPLICATION, USER, GROUP, PERMISSION, TIMES;
-
-    final char symbol;
-
-    private FileAttribute() {
-      symbol = StringUtils.toLowerCase(toString()).charAt(0);
-    }
-    
-    static EnumSet<FileAttribute> parse(String s) {
-      if (s == null || s.length() == 0) {
-        return EnumSet.allOf(FileAttribute.class);
-      }
-
-      EnumSet<FileAttribute> set = EnumSet.noneOf(FileAttribute.class);
-      FileAttribute[] attributes = values();
-      for(char c : s.toCharArray()) {
-        int i = 0;
-        for(; i < attributes.length && c != attributes[i].symbol; i++);
-        if (i < attributes.length) {
-          if (!set.contains(attributes[i])) {
-            set.add(attributes[i]);
-          } else {
-            throw new IllegalArgumentException("There are more than one '"
-                + attributes[i].symbol + "' in " + s); 
-          }
-        } else {
-          throw new IllegalArgumentException("'" + c + "' in " + s
-              + " is undefined.");
-        }
-      }
-      return set;
-    }
-  }
-
-  static final String TMP_DIR_LABEL = NAME + ".tmp.dir";
-  static final String DST_DIR_LABEL = NAME + ".dest.path";
-  static final String JOB_DIR_LABEL = NAME + ".job.dir";
-  static final String MAX_MAPS_LABEL = NAME + ".max.map.tasks";
-  static final String SRC_LIST_LABEL = NAME + ".src.list";
-  static final String SRC_COUNT_LABEL = NAME + ".src.count";
-  static final String TOTAL_SIZE_LABEL = NAME + ".total.size";
-  static final String DST_DIR_LIST_LABEL = NAME + ".dst.dir.list";
-  static final String BYTES_PER_MAP_LABEL = NAME + ".bytes.per.map";
-  static final String PRESERVE_STATUS_LABEL
-      = Options.PRESERVE_STATUS.propertyname + ".value";
-  static final String FILE_RETRIES_LABEL = NAME + ".file.retries";
-
-  private JobConf conf;
-
-  public void setConf(Configuration conf) {
-    if (conf instanceof JobConf) {
-      this.conf = (JobConf) conf;
-    } else {
-      this.conf = new JobConf(conf);
-    }
-  }
-
-  public Configuration getConf() {
-    return conf;
-  }
-
-  public DistCpV1(Configuration conf) {
-    setConf(conf);
-  }
-
-  /**
-   * An input/output pair of filenames.
-   */
-  static class FilePair implements Writable {
-    FileStatus input = new FileStatus();
-    String output;
-    FilePair() { }
-    FilePair(FileStatus input, String output) {
-      this.input = input;
-      this.output = output;
-    }
-    public void readFields(DataInput in) throws IOException {
-      input.readFields(in);
-      output = Text.readString(in);
-    }
-    public void write(DataOutput out) throws IOException {
-      input.write(out);
-      Text.writeString(out, output);
-    }
-    public String toString() {
-      return input + " : " + output;
-    }
-  }
-
-  /**
-   * InputFormat of a distcp job responsible for generating splits of the src
-   * file list.
-   */
-  static class CopyInputFormat implements InputFormat<Text, Text> {
-
-    /**
-     * Produce splits such that each is no greater than the quotient of the
-     * total size and the number of splits requested.
-     * @param job The handle to the JobConf object
-     * @param numSplits Number of splits requested
-     */
-    public InputSplit[] getSplits(JobConf job, int numSplits)
-        throws IOException {
-      int cnfiles = job.getInt(SRC_COUNT_LABEL, -1);
-      long cbsize = job.getLong(TOTAL_SIZE_LABEL, -1);
-      String srcfilelist = job.get(SRC_LIST_LABEL, "");
-      if (cnfiles < 0 || cbsize < 0 || "".equals(srcfilelist)) {
-        throw new RuntimeException("Invalid metadata: #files(" + cnfiles +
-                                   ") total_size(" + cbsize + ") listuri(" +
-                                   srcfilelist + ")");
-      }
-      Path src = new Path(srcfilelist);
-      FileSystem fs = src.getFileSystem(job);
-      FileStatus srcst = fs.getFileStatus(src);
-
-      ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
-      LongWritable key = new LongWritable();
-      FilePair value = new FilePair();
-      final long targetsize = cbsize / numSplits;
-      long pos = 0L;
-      long last = 0L;
-      long acc = 0L;
-      long cbrem = srcst.getLen();
-      try (SequenceFile.Reader sl =
-          new SequenceFile.Reader(job, Reader.file(src))) {
-        for (; sl.next(key, value); last = sl.getPosition()) {
-          // if adding this split would put this split past the target size,
-          // cut the last split and put this next file in the next split.
-          if (acc + key.get() > targetsize && acc != 0) {
-            long splitsize = last - pos;
-            splits.add(new FileSplit(src, pos, splitsize, (String[])null));
-            cbrem -= splitsize;
-            pos = last;
-            acc = 0L;
-          }
-          acc += key.get();
-        }
-      }
-      if (cbrem != 0) {
-        splits.add(new FileSplit(src, pos, cbrem, (String[])null));
-      }
-
-      return splits.toArray(new FileSplit[splits.size()]);
-    }
-
-    /**
-     * Returns a reader for this split of the src file list.
-     */
-    public RecordReader<Text, Text> getRecordReader(InputSplit split,
-        JobConf job, Reporter reporter) throws IOException {
-      return new SequenceFileRecordReader<Text, Text>(job, (FileSplit)split);
-    }
-  }
-
-  /**
-   * FSCopyFilesMapper: The mapper for copying files between FileSystems.
-   */
-  static class CopyFilesMapper
-      implements Mapper<LongWritable, FilePair, WritableComparable<?>, Text> {
-    // config
-    private int sizeBuf = 128 * 1024;
-    private FileSystem destFileSys = null;
-    private boolean ignoreReadFailures;
-    private boolean preserve_status;
-    private EnumSet<FileAttribute> preseved;
-    private boolean overwrite;
-    private boolean update;
-    private Path destPath = null;
-    private byte[] buffer = null;
-    private JobConf job;
-    private boolean skipCRCCheck = false;
-    
-    // stats
-    private int failcount = 0;
-    private int skipcount = 0;
-    private int copycount = 0;
-
-    private String getCountString() {
-      return "Copied: " + copycount + " Skipped: " + skipcount
-          + " Failed: " + failcount;
-    }
-    private void updateStatus(Reporter reporter) {
-      reporter.setStatus(getCountString());
-    }
-
-    /**
-     * Return true if dst should be replaced by src and the update flag is set.
-     * Right now, this merely checks that the src and dst len are not equal. 
-     * This should be improved on once modification times, CRCs, etc. can
-     * be meaningful in this context.
-     * @throws IOException 
-     */
-    private boolean needsUpdate(FileStatus srcstatus,
-        FileSystem dstfs, Path dstpath) throws IOException {
-      return update && !sameFile(srcstatus.getPath().getFileSystem(job),
-          srcstatus, dstfs, dstpath, skipCRCCheck);
-    }
-    
-    private FSDataOutputStream create(Path f, Reporter reporter,
-        FileStatus srcstat) throws IOException {
-      if (destFileSys.exists(f)) {
-        destFileSys.delete(f, false);
-      }
-      if (!preserve_status) {
-        return destFileSys.create(f, true, sizeBuf, reporter);
-      }
-
-      FsPermission permission = preseved.contains(FileAttribute.PERMISSION)?
-          srcstat.getPermission(): null;
-      short replication = preseved.contains(FileAttribute.REPLICATION)?
-          srcstat.getReplication(): destFileSys.getDefaultReplication(f);
-      long blockSize = preseved.contains(FileAttribute.BLOCK_SIZE)?
-          srcstat.getBlockSize(): destFileSys.getDefaultBlockSize(f);
-      return destFileSys.create(f, permission, true, sizeBuf, replication,
-          blockSize, reporter);
-    }
-
-    /**
-     * Validates copy by checking the sizes of files first and then
-     * checksums, if the filesystems support checksums.
-     * @param srcstat src path and metadata
-     * @param absdst dst path
-     * @return true if src & destination files are same
-     */
-    private boolean validateCopy(FileStatus srcstat, Path absdst)
-            throws IOException {
-      if (destFileSys.exists(absdst)) {
-        if (sameFile(srcstat.getPath().getFileSystem(job), srcstat,
-            destFileSys, absdst, skipCRCCheck)) {
-          return true;
-        }
-      }
-      return false;
-    }
-    
-    /**
-     * Increment number of files copied and bytes copied and then report status
-     */
-    void updateCopyStatus(FileStatus srcstat, Reporter reporter) {
-      copycount++;
-      reporter.incrCounter(Counter.BYTESCOPIED, srcstat.getLen());
-      reporter.incrCounter(Counter.COPY, 1);
-      updateStatus(reporter);
-    }
-    
-    /**
-     * Skip copying this file if already exists at the destination.
-     * Updates counters and copy status if skipping this file.
-     * @return true    if copy of this file can be skipped
-     */
-    private boolean skipCopyFile(FileStatus srcstat, Path absdst,
-                            OutputCollector<WritableComparable<?>, Text> outc,
-                            Reporter reporter) throws IOException {
-      if (destFileSys.exists(absdst) && !overwrite
-          && !needsUpdate(srcstat, destFileSys, absdst)) {
-        outc.collect(null, new Text("SKIP: " + srcstat.getPath()));
-        ++skipcount;
-        reporter.incrCounter(Counter.SKIP, 1);
-        updateStatus(reporter);
-        return true;
-      }
-      return false;
-    }
-    
-    /**
-     * Copies single file to the path specified by tmpfile.
-     * @param srcstat  src path and metadata
-     * @param tmpfile  temporary file to which copy is to be done
-     * @param absdst   actual destination path to which copy is to be done
-     * @param reporter
-     * @return Number of bytes copied
-     */
-    private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
-                            Reporter reporter) throws IOException {
-      long bytesCopied = 0L;
-      Path srcPath = srcstat.getPath();
-      // open src file
-      try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
-        reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
-        // open tmp file
-        try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
-          LOG.info("Copying file " + srcPath + " of size " +
-                   srcstat.getLen() + " bytes...");
-        
-          // copy file
-          for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
-            out.write(buffer, 0, bytesRead);
-            bytesCopied += bytesRead;
-            reporter.setStatus(
-                String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
-                + absdst + " [ " +
-                TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
-                + TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
-                + " ]");
-          }
-        }
-      }
-      return bytesCopied;
-    }
-    
-    /**
-     * Copy a file to a destination.
-     * @param srcstat src path and metadata
-     * @param relativedst relative dst path
-     * @param outc Log of skipped files
-     * @param reporter
-     * @throws IOException if copy fails(even if the validation of copy fails)
-     */
-    private void copy(FileStatus srcstat, Path relativedst,
-        OutputCollector<WritableComparable<?>, Text> outc, Reporter reporter)
-        throws IOException {
-      Path absdst = new Path(destPath, relativedst);
-      int totfiles = job.getInt(SRC_COUNT_LABEL, -1);
-      assert totfiles >= 0 : "Invalid file count " + totfiles;
-
-      if (totfiles == 1) {
-        // Copying a single file; use dst path provided by user as
-        // destination file rather than destination directory
-        Path dstparent = absdst.getParent();
-        if (!(destFileSys.exists(dstparent) &&
-              destFileSys.getFileStatus(dstparent).isDirectory())) {
-          absdst = dstparent;
-        }
-      }
-      
-      // if a directory, ensure created even if empty
-      if (srcstat.isDirectory()) {
-        if (destFileSys.exists(absdst)) {
-          if (destFileSys.getFileStatus(absdst).isFile()) {
-            throw new IOException("Failed to mkdirs: " + absdst+" is a file.");
-          }
-        }
-        else if (!destFileSys.mkdirs(absdst)) {
-          throw new IOException("Failed to mkdirs " + absdst);
-        }
-        // TODO: when modification times can be set, directories should be
-        // emitted to reducers so they might be preserved. Also, mkdirs does
-        // not currently return an error when the directory already exists;
-        // if this changes, all directory work might as well be done in reduce
-        return;
-      }
-
-      // Can we skip copying this file ?
-      if (skipCopyFile(srcstat, absdst, outc, reporter)) {
-        return;
-      }
-
-      Path tmpfile = new Path(job.get(TMP_DIR_LABEL), relativedst);
-      // do the actual copy to tmpfile
-      long bytesCopied = doCopyFile(srcstat, tmpfile, absdst, reporter);
-
-      if (bytesCopied != srcstat.getLen()) {
-        throw new IOException("File size not matched: copied "
-            + bytesString(bytesCopied) + " to tmpfile (=" + tmpfile
-            + ") but expected " + bytesString(srcstat.getLen()) 
-            + " from " + srcstat.getPath());        
-      }
-      else {
-        if (destFileSys.exists(absdst) &&
-            destFileSys.getFileStatus(absdst).isDirectory()) {
-          throw new IOException(absdst + " is a directory");
-        }
-        if (!destFileSys.mkdirs(absdst.getParent())) {
-          throw new IOException("Failed to create parent dir: " + absdst.getParent());
-        }
-        rename(tmpfile, absdst);
-
-        if (!validateCopy(srcstat, absdst)) {
-          destFileSys.delete(absdst, false);
-          throw new IOException("Validation of copy of file "
-              + srcstat.getPath() + " failed.");
-        } 
-        updateDestStatus(srcstat, destFileSys.getFileStatus(absdst));
-      }
-
-      // report at least once for each file
-      updateCopyStatus(srcstat, reporter);
-    }
-    
-    /** rename tmp to dst, delete dst if already exists */
-    private void rename(Path tmp, Path dst) throws IOException {
-      try {
-        if (destFileSys.exists(dst)) {
-          destFileSys.delete(dst, true);
-        }
-        if (!destFileSys.rename(tmp, dst)) {
-          throw new IOException();
-        }
-      }
-      catch(IOException cause) {
-        throw (IOException)new IOException("Fail to rename tmp file (=" + tmp 
-            + ") to destination file (=" + dst + ")").initCause(cause);
-      }
-    }
-
-    private void updateDestStatus(FileStatus src, FileStatus dst
-        ) throws IOException {
-      if (preserve_status) {
-        DistCpV1.updateDestStatus(src, dst, preseved, destFileSys);
-      }
-    }
-
-    static String bytesString(long b) {
-      return b + " bytes (" +
-          TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
-    }
-
-    /**
-     * Copies a file and validates the copy by checking the checksums.
-     * If validation fails, retries (max number of tries is distcp.file.retries)
-     * to copy the file.
-     */
-    void copyWithRetries(FileStatus srcstat, Path relativedst,
-                         OutputCollector<WritableComparable<?>, Text> out,
-                         Reporter reporter) throws IOException {
-
-      // max tries to copy when validation of copy fails
-      final int maxRetries = job.getInt(FILE_RETRIES_LABEL, DEFAULT_FILE_RETRIES);
-      // save update flag for later copies within the same map task
-      final boolean saveUpdate = update;
-      
-      int retryCnt = 1;
-      for (; retryCnt <= maxRetries; retryCnt++) {
-        try {
-          //copy the file and validate copy
-          copy(srcstat, relativedst, out, reporter);
-          break;// copy successful
-        } catch (IOException e) {
-          LOG.warn("Copy of " + srcstat.getPath() + " failed.", e);
-          if (retryCnt < maxRetries) {// copy failed and need to retry
-            LOG.info("Retrying copy of file " + srcstat.getPath());
-            update = true; // set update flag for retries
-          }
-          else {// no more retries... Give up
-            update = saveUpdate;
-            throw new IOException("Copy of file failed even with " + retryCnt
-                                  + " tries.", e);
-          }
-        }
-      }
-    }
-    
-    /** Mapper configuration.
-     * Extracts source and destination file system, as well as
-     * top-level paths on source and destination directories.
-     * Gets the named file systems, to be used later in map.
-     */
-    public void configure(JobConf job)
-    {
-      destPath = new Path(job.get(DST_DIR_LABEL, "/"));
-      try {
-        destFileSys = destPath.getFileSystem(job);
-      } catch (IOException ex) {
-        throw new RuntimeException("Unable to get the named file system.", ex);
-      }
-      sizeBuf = job.getInt("copy.buf.size", 128 * 1024);
-      buffer = new byte[sizeBuf];
-      ignoreReadFailures = job.getBoolean(Options.IGNORE_READ_FAILURES.propertyname, false);
-      preserve_status = job.getBoolean(Options.PRESERVE_STATUS.propertyname, false);
-      if (preserve_status) {
-        preseved = FileAttribute.parse(job.get(PRESERVE_STATUS_LABEL));
-      }
-      update = job.getBoolean(Options.UPDATE.propertyname, false);
-      overwrite = !update && job.getBoolean(Options.OVERWRITE.propertyname, false);
-      skipCRCCheck = job.getBoolean(Options.SKIPCRC.propertyname, false);
-      this.job = job;
-    }
-
-    /** Map method. Copies one file from source file system to destination.
-     * @param key src len
-     * @param value FilePair (FileStatus src, Path dst)
-     * @param out Log of failed copies
-     * @param reporter
-     */
-    public void map(LongWritable key,
-                    FilePair value,
-                    OutputCollector<WritableComparable<?>, Text> out,
-                    Reporter reporter) throws IOException {
-      final FileStatus srcstat = value.input;
-      final Path relativedst = new Path(value.output);
-      try {
-        copyWithRetries(srcstat, relativedst, out, reporter);
-      } catch (IOException e) {
-        ++failcount;
-        reporter.incrCounter(Counter.FAIL, 1);
-        updateStatus(reporter);
-        final String sfailure = "FAIL " + relativedst + " : " +
-                          StringUtils.stringifyException(e);
-        out.collect(null, new Text(sfailure));
-        LOG.info(sfailure);
-        if (e instanceof FileNotFoundException) {
-          final String s = "Possible Cause for failure: Either the filesystem "
-                           + srcstat.getPath().getFileSystem(job)
-                           + " is not accessible or the file is deleted";
-          LOG.error(s);
-          out.collect(null, new Text(s));
-        }
-
-        try {
-          for (int i = 0; i < 3; ++i) {
-            try {
-              final Path tmp = new Path(job.get(TMP_DIR_LABEL), relativedst);
-              if (destFileSys.delete(tmp, true))
-                break;
-            } catch (Throwable ex) {
-              // ignore, we are just cleaning up
-              LOG.debug("Ignoring cleanup exception", ex);
-            }
-            // update status, so we don't get timed out
-            updateStatus(reporter);
-            Thread.sleep(3 * 1000);
-          }
-        } catch (InterruptedException inte) {
-          throw (IOException)new IOException().initCause(inte);
-        }
-      } finally {
-        updateStatus(reporter);
-      }
-    }
-
-    public void close() throws IOException {
-      if (0 == failcount || ignoreReadFailures) {
-        return;
-      }
-      throw new IOException(getCountString());
-    }
-  }
-
-  private static List<Path> fetchFileList(Configuration conf, Path srcList)
-      throws IOException {
-    List<Path> result = new ArrayList<Path>();
-    FileSystem fs = srcList.getFileSystem(conf);
-    try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(srcList),
-            Charset.forName("UTF-8")))) {
-      String line = input.readLine();
-      while (line != null) {
-        result.add(new Path(line));
-        line = input.readLine();
-      }
-    }
-    return result;
-  }
-
-  @Deprecated
-  public static void copy(Configuration conf, String srcPath,
-                          String destPath, Path logPath,
-                          boolean srcAsList, boolean ignoreReadFailures)
-      throws IOException {
-    final Path src = new Path(srcPath);
-    List<Path> tmp = new ArrayList<Path>();
-    if (srcAsList) {
-      tmp.addAll(fetchFileList(conf, src));
-    } else {
-      tmp.add(src);
-    }
-    EnumSet<Options> flags = ignoreReadFailures
-      ? EnumSet.of(Options.IGNORE_READ_FAILURES)
-      : EnumSet.noneOf(Options.class);
-
-    final Path dst = new Path(destPath);
-    copy(conf, new Arguments(tmp, null, dst, logPath, flags, null,
-        Long.MAX_VALUE, Long.MAX_VALUE, null, false));
-  }
-
-  /** Sanity check for srcPath */
-  private static void checkSrcPath(JobConf jobConf, List<Path> srcPaths) 
-  throws IOException {
-    List<IOException> rslt = new ArrayList<IOException>();
-    List<Path> unglobbed = new LinkedList<Path>();
-    
-    Path[] ps = new Path[srcPaths.size()];
-    ps = srcPaths.toArray(ps);
-    TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), ps, jobConf);
-    
-    
-    for (Path p : srcPaths) {
-      FileSystem fs = p.getFileSystem(jobConf);
-      FileStatus[] inputs = fs.globStatus(p);
-      
-      if(inputs != null && inputs.length > 0) {
-        for (FileStatus onePath: inputs) {
-          unglobbed.add(onePath.getPath());
-        }
-      } else {
-        rslt.add(new IOException("Input source " + p + " does not exist."));
-      }
-    }
-    if (!rslt.isEmpty()) {
-      throw new InvalidInputException(rslt);
-    }
-    srcPaths.clear();
-    srcPaths.addAll(unglobbed);
-  }
-
-  /**
-   * Driver to copy srcPath to destPath depending on required protocol.
-   * @param conf configuration
-   * @param args arguments
-   */
-  static void copy(final Configuration conf, final Arguments args
-      ) throws IOException {
-    LOG.info("srcPaths=" + args.srcs);
-    if (!args.dryrun || args.flags.contains(Options.UPDATE)) {
-      LOG.info("destPath=" + args.dst);
-    }
-
-    JobConf job = createJobConf(conf);
-    
-    checkSrcPath(job, args.srcs);
-    if (args.preservedAttributes != null) {
-      job.set(PRESERVE_STATUS_LABEL, args.preservedAttributes);
-    }
-    if (args.mapredSslConf != null) {
-      job.set("dfs.https.client.keystore.resource", args.mapredSslConf);
-    }
-    
-    //Initialize the mapper
-    try {
-      if (setup(conf, job, args)) {
-        JobClient.runJob(job);
-      }
-      if(!args.dryrun) {
-        finalize(conf, job, args.dst, args.preservedAttributes);
-      }
-    } finally {
-      if (!args.dryrun) {
-        //delete tmp
-        fullyDelete(job.get(TMP_DIR_LABEL), job);
-      }
-      //delete jobDirectory
-      fullyDelete(job.get(JOB_DIR_LABEL), job);
-    }
-  }
-
-  private static void updateDestStatus(FileStatus src, FileStatus dst,
-      EnumSet<FileAttribute> preseved, FileSystem destFileSys
-      ) throws IOException {
-    String owner = null;
-    String group = null;
-    if (preseved.contains(FileAttribute.USER)
-        && !src.getOwner().equals(dst.getOwner())) {
-      owner = src.getOwner();
-    }
-    if (preseved.contains(FileAttribute.GROUP)
-        && !src.getGroup().equals(dst.getGroup())) {
-      group = src.getGroup();
-    }
-    if (owner != null || group != null) {
-      destFileSys.setOwner(dst.getPath(), owner, group);
-    }
-    if (preseved.contains(FileAttribute.PERMISSION)
-        && !src.getPermission().equals(dst.getPermission())) {
-      destFileSys.setPermission(dst.getPath(), src.getPermission());
-    }
-    if (preseved.contains(FileAttribute.TIMES)) {
-      destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
-    }
-  }
-
-  static private void finalize(Configuration conf, JobConf jobconf,
-      final Path destPath, String presevedAttributes) throws IOException {
-    if (presevedAttributes == null) {
-      return;
-    }
-    EnumSet<FileAttribute> preseved = FileAttribute.parse(presevedAttributes);
-    if (!preseved.contains(FileAttribute.USER)
-        && !preseved.contains(FileAttribute.GROUP)
-        && !preseved.contains(FileAttribute.PERMISSION)) {
-      return;
-    }
-
-    FileSystem dstfs = destPath.getFileSystem(conf);
-    Path dstdirlist = new Path(jobconf.get(DST_DIR_LIST_LABEL));
-    try (SequenceFile.Reader in =
-        new SequenceFile.Reader(jobconf, Reader.file(dstdirlist))) {
-      Text dsttext = new Text();
-      FilePair pair = new FilePair(); 
-      for(; in.next(dsttext, pair); ) {
-        Path absdst = new Path(destPath, pair.output);
-        updateDestStatus(pair.input, dstfs.getFileStatus(absdst),
-            preseved, dstfs);
-      }
-    }
-  }
-
-  static class Arguments {
-    final List<Path> srcs;
-    final Path basedir;
-    final Path dst;
-    final Path log;
-    final EnumSet<Options> flags;
-    final String preservedAttributes;
-    final long filelimit;
-    final long sizelimit;
-    final String mapredSslConf;
-    final boolean dryrun;
-    
-    /**
-     * Arguments for distcp
-     * @param srcs List of source paths
-     * @param basedir Base directory for copy
-     * @param dst Destination path
-     * @param log Log output directory
-     * @param flags Command-line flags
-     * @param preservedAttributes Preserved attributes 
-     * @param filelimit File limit
-     * @param sizelimit Size limit
-     * @param mapredSslConf ssl configuration
-     * @param dryrun
-     */
-    Arguments(List<Path> srcs, Path basedir, Path dst, Path log,
-        EnumSet<Options> flags, String preservedAttributes,
-        long filelimit, long sizelimit, String mapredSslConf,
-        boolean dryrun) {
-      this.srcs = srcs;
-      this.basedir = basedir;
-      this.dst = dst;
-      this.log = log;
-      this.flags = flags;
-      this.preservedAttributes = preservedAttributes;
-      this.filelimit = filelimit;
-      this.sizelimit = sizelimit;
-      this.mapredSslConf = mapredSslConf;
-      this.dryrun = dryrun;
-      
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("this = " + this);
-      }
-    }
-
-    static Arguments valueOf(String[] args, Configuration conf
-        ) throws IOException {
-      List<Path> srcs = new ArrayList<Path>();
-      Path dst = null;
-      Path log = null;
-      Path basedir = null;
-      EnumSet<Options> flags = EnumSet.noneOf(Options.class);
-      String presevedAttributes = null;
-      String mapredSslConf = null;
-      long filelimit = Long.MAX_VALUE;
-      long sizelimit = Long.MAX_VALUE;
-      boolean dryrun = false;
-
-      for (int idx = 0; idx < args.length; idx++) {
-        Options[] opt = Options.values();
-        int i = 0;
-        for(; i < opt.length && !args[idx].startsWith(opt[i].cmd); i++);
-
-        if (i < opt.length) {
-          flags.add(opt[i]);
-          if (opt[i] == Options.PRESERVE_STATUS) {
-            presevedAttributes =  args[idx].substring(2);         
-            FileAttribute.parse(presevedAttributes); //validation
-          }
-          else if (opt[i] == Options.FILE_LIMIT) {
-            filelimit = Options.FILE_LIMIT.parseLong(args, ++idx);
-          }
-          else if (opt[i] == Options.SIZE_LIMIT) {
-            sizelimit = Options.SIZE_LIMIT.parseLong(args, ++idx);
-          }
-        } else if ("-f".equals(args[idx])) {
-          if (++idx ==  args.length) {
-            throw new IllegalArgumentException("urilist_uri not specified in -f");
-          }
-          srcs.addAll(fetchFileList(conf, new Path(args[idx])));
-        } else if ("-log".equals(args[idx])) {
-          if (++idx ==  args.length) {
-            throw new IllegalArgumentException("logdir not specified in -log");
-          }
-          log = new Path(args[idx]);
-        } else if ("-basedir".equals(args[idx])) {
-          if (++idx ==  args.length) {
-            throw new IllegalArgumentException("basedir not specified in -basedir");
-          }
-          basedir = new Path(args[idx]);
-        } else if ("-mapredSslConf".equals(args[idx])) {
-          if (++idx ==  args.length) {
-            throw new IllegalArgumentException("ssl conf file not specified in -mapredSslConf");
-          }
-          mapredSslConf = args[idx];
-        } else if ("-dryrun".equals(args[idx])) {
-          dryrun = true;
-          dst = new Path("/tmp/distcp_dummy_dest");//dummy destination
-        } else if ("-m".equals(args[idx])) {
-          if (++idx == args.length) {
-            throw new IllegalArgumentException("num_maps not specified in -m");
-          }
-          try {
-            conf.setInt(MAX_MAPS_LABEL, Integer.parseInt(args[idx]));
-          } catch (NumberFormatException e) {
-            throw new IllegalArgumentException("Invalid argument to -m: " +
-                                               args[idx]);
-          }
-        } else if ('-' == args[idx].codePointAt(0)) {
-          throw new IllegalArgumentException("Invalid switch " + args[idx]);
-        } else if (idx == args.length -1 &&
-                   (!dryrun || flags.contains(Options.UPDATE))) {
-          dst = new Path(args[idx]);
-        } else {
-          srcs.add(new Path(args[idx]));
-        }
-      }
-      // mandatory command-line parameters
-      if (srcs.isEmpty() || dst == null) {
-        throw new IllegalArgumentException("Missing "
-            + (dst == null ? "dst path" : "src"));
-      }
-      // incompatible command-line flags
-      final boolean isOverwrite = flags.contains(Options.OVERWRITE);
-      final boolean isUpdate = flags.contains(Options.UPDATE);
-      final boolean isDelete = flags.contains(Options.DELETE);
-      final boolean skipCRC = flags.contains(Options.SKIPCRC);
-      if (isOverwrite && isUpdate) {
-        throw new IllegalArgumentException("Conflicting overwrite policies");
-      }
-      if (!isUpdate && skipCRC) {
-        throw new IllegalArgumentException(
-            Options.SKIPCRC.cmd + " is relevant only with the " +
-            Options.UPDATE.cmd + " option");
-      }
-      if (isDelete && !isOverwrite && !isUpdate) {
-        throw new IllegalArgumentException(Options.DELETE.cmd
-            + " must be specified with " + Options.OVERWRITE + " or "
-            + Options.UPDATE + ".");
-      }
-      return new Arguments(srcs, basedir, dst, log, flags, presevedAttributes,
-          filelimit, sizelimit, mapredSslConf, dryrun);
-    }
-    
-    /** {@inheritDoc} */
-    public String toString() {
-      return getClass().getName() + "{"
-          + "\n  srcs = " + srcs 
-          + "\n  dst = " + dst 
-          + "\n  log = " + log 
-          + "\n  flags = " + flags
-          + "\n  preservedAttributes = " + preservedAttributes 
-          + "\n  filelimit = " + filelimit 
-          + "\n  sizelimit = " + sizelimit
-          + "\n  mapredSslConf = " + mapredSslConf
-          + "\n}"; 
-    }
-  }
-
-  /**
-   * This is the main driver for recursively copying directories
-   * across file systems. It takes at least two cmdline parameters. A source
-   * URL and a destination URL. It then essentially does an "ls -lR" on the
-   * source URL, and writes the output in a round-robin manner to all the map
-   * input files. The mapper actually copies the files allotted to it. The
-   * reduce is empty.
-   */
-  public int run(String[] args) {
-    try {
-      copy(conf, Arguments.valueOf(args, conf));
-      return 0;
-    } catch (IllegalArgumentException e) {
-      System.err.println(StringUtils.stringifyException(e) + "\n" + usage);
-      ToolRunner.printGenericCommandUsage(System.err);
-      return -1;
-    } catch (DuplicationException e) {
-      System.err.println(StringUtils.stringifyException(e));
-      return DuplicationException.ERROR_CODE;
-    } catch (RemoteException e) {
-      final IOException unwrapped = e.unwrapRemoteException(
-          FileNotFoundException.class, 
-          AccessControlException.class,
-          QuotaExceededException.class);
-      System.err.println(StringUtils.stringifyException(unwrapped));
-      return -3;
-    } catch (Exception e) {
-      System.err.println("With failures, global counters are inaccurate; " +
-          "consider running with -i");
-      System.err.println("Copy failed: " + StringUtils.stringifyException(e));
-      return -999;
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    JobConf job = new JobConf(DistCpV1.class);
-    DistCpV1 distcp = new DistCpV1(job);
-    int res = ToolRunner.run(distcp, args);
-    System.exit(res);
-  }
-
-  /**
-   * Make a path relative with respect to a root path.
-   * absPath is always assumed to descend from root.
-   * Otherwise returned path is null.
-   */
-  static String makeRelative(Path root, Path absPath) {
-    if (!absPath.isAbsolute()) {
-      throw new IllegalArgumentException("!absPath.isAbsolute(), absPath="
-          + absPath);
-    }
-    String p = absPath.toUri().getPath();
-
-    StringTokenizer pathTokens = new StringTokenizer(p, "/");
-    for(StringTokenizer rootTokens = new StringTokenizer(
-        root.toUri().getPath(), "/"); rootTokens.hasMoreTokens(); ) {
-      if (!rootTokens.nextToken().equals(pathTokens.nextToken())) {
-        return null;
-      }
-    }
-    StringBuilder sb = new StringBuilder();
-    for(; pathTokens.hasMoreTokens(); ) {
-      sb.append(pathTokens.nextToken());
-      if (pathTokens.hasMoreTokens()) { sb.append(Path.SEPARATOR); }
-    }
-    return sb.length() == 0? ".": sb.toString();
-  }
-
-  /**
-   * Calculate how many maps to run.
-   * Number of maps is bounded by a minimum of the cumulative size of the
-   * copy / (distcp.bytes.per.map, default BYTES_PER_MAP or -m on the
-   * command line) and at most (distcp.max.map.tasks, default
-   * MAX_MAPS_PER_NODE * nodes in the cluster).
-   * @param totalBytes Count of total bytes for job
-   * @param job The job to configure
-   * @return Count of maps to run.
-   */
-  private static int setMapCount(long totalBytes, JobConf job) 
-      throws IOException {
-    int numMaps =
-      (int)(totalBytes / job.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP));
-    numMaps = Math.min(numMaps, 
-        job.getInt(MAX_MAPS_LABEL, MAX_MAPS_PER_NODE *
-          new JobClient(job).getClusterStatus().getTaskTrackers()));
-    numMaps = Math.max(numMaps, 1);
-    job.setNumMapTasks(numMaps);
-    return numMaps;
-  }
-
-  /** Fully delete dir */
-  static void fullyDelete(String dir, Configuration conf) throws IOException {
-    if (dir != null) {
-      Path tmp = new Path(dir);
-      boolean success = tmp.getFileSystem(conf).delete(tmp, true);
-      if (!success) {
-        LOG.warn("Could not fully delete " + tmp);
-      }
-    }
-  }
-
-  //Job configuration
-  private static JobConf createJobConf(Configuration conf) {
-    JobConf jobconf = new JobConf(conf, DistCpV1.class);
-    jobconf.setJobName(conf.get("mapred.job.name", NAME));
-
-    // turn off speculative execution, because DFS doesn't handle
-    // multiple writers to the same file.
-    jobconf.setMapSpeculativeExecution(false);
-
-    jobconf.setInputFormat(CopyInputFormat.class);
-    jobconf.setOutputKeyClass(Text.class);
-    jobconf.setOutputValueClass(Text.class);
-
-    jobconf.setMapperClass(CopyFilesMapper.class);
-    jobconf.setNumReduceTasks(0);
-    return jobconf;
-  }
-
-  private static final Random RANDOM = new Random();
-  public static String getRandomId() {
-    return Integer.toString(RANDOM.nextInt(Integer.MAX_VALUE), 36);
-  }
-
-  /**
-   * Increase the replication factor of _distcp_src_files to
-   * sqrt(min(maxMapsOnCluster, numMaps)). This is to reduce the chance of
-   * failing of distcp because of "not having a replication of _distcp_src_files
-   * available for reading for some maps".
-   */
-  private static void setReplication(Configuration conf, JobConf jobConf,
-                         Path srcfilelist, int numMaps) throws IOException {
-    int numMaxMaps = new JobClient(jobConf).getClusterStatus().getMaxMapTasks();
-    short replication = (short) Math.ceil(
-                                Math.sqrt(Math.min(numMaxMaps, numMaps)));
-    FileSystem fs = srcfilelist.getFileSystem(conf);
-    FileStatus srcStatus = fs.getFileStatus(srcfilelist);
-
-    if (srcStatus.getReplication() < replication) {
-      if (!fs.setReplication(srcfilelist, replication)) {
-        throw new IOException("Unable to increase the replication of file " +
-                              srcfilelist);
-      }
-    }
-  }
-  
-  /**
-   * Does the dir already exist at destination ?
-   * @return true   if the dir already exists at destination
-   */
-  private static boolean dirExists(Configuration conf, Path dst)
-                 throws IOException {
-    FileSystem destFileSys = dst.getFileSystem(conf);
-    FileStatus status = null;
-    try {
-      status = destFileSys.getFileStatus(dst);
-    }catch (FileNotFoundException e) {
-      return false;
-    }
-    if (status.isFile()) {
-      throw new FileAlreadyExistsException("Not a dir: " + dst+" is a file.");
-    }
-    return true;
-  }
-  
-  /**
-   * Initialize DFSCopyFileMapper specific job-configuration.
-   * @param conf : The dfs/mapred configuration.
-   * @param jobConf : The handle to the jobConf object to be initialized.
-   * @param args Arguments
-   * @return true if it is necessary to launch a job.
-   */
-  static boolean setup(Configuration conf, JobConf jobConf,
-                            final Arguments args)
-      throws IOException {
-    jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString());
-
-    //set boolean values
-    final boolean update = args.flags.contains(Options.UPDATE);
-    final boolean skipCRCCheck = args.flags.contains(Options.SKIPCRC);
-    final boolean overwrite = !update && args.flags.contains(Options.OVERWRITE)
-                              && !args.dryrun;
-    jobConf.setBoolean(Options.UPDATE.propertyname, update);
-    jobConf.setBoolean(Options.SKIPCRC.propertyname, skipCRCCheck);
-    jobConf.setBoolean(Options.OVERWRITE.propertyname, overwrite);
-    jobConf.setBoolean(Options.IGNORE_READ_FAILURES.propertyname,
-        args.flags.contains(Options.IGNORE_READ_FAILURES));
-    jobConf.setBoolean(Options.PRESERVE_STATUS.propertyname,
-        args.flags.contains(Options.PRESERVE_STATUS));
-
-    final String randomId = getRandomId();
-    JobClient jClient = new JobClient(jobConf);
-    Path stagingArea;
-    try {
-      stagingArea = 
-        JobSubmissionFiles.getStagingDir(jClient.getClusterHandle(), conf);
-    } catch (InterruptedException ie) {
-      throw new IOException(ie);
-    }
-    
-    Path jobDirectory = new Path(stagingArea + NAME + "_" + randomId);
-    FsPermission mapredSysPerms = 
-      new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
-    FileSystem.mkdirs(jClient.getFs(), jobDirectory, mapredSysPerms);
-    jobConf.set(JOB_DIR_LABEL, jobDirectory.toString());
-
-    long maxBytesPerMap = conf.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP);
-
-    FileSystem dstfs = args.dst.getFileSystem(conf);
-    
-    // get tokens for all the required FileSystems..
-    TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), 
-                                        new Path[] {args.dst}, conf);
-    
-    
-    boolean dstExists = dstfs.exists(args.dst);
-    boolean dstIsDir = false;
-    if (dstExists) {
-      dstIsDir = dstfs.getFileStatus(args.dst).isDirectory();
-    }
-
-    // default logPath
-    Path logPath = args.log; 
-    if (logPath == null) {
-      String filename = "_distcp_logs_" + randomId;
-      if (!dstExists || !dstIsDir) {
-        Path parent = args.dst.getParent();
-        if (null == parent) {
-          // If dst is '/' on S3, it might not exist yet, but dst.getParent()
-          // will return null. In this case, use '/' as its own parent to prevent
-          // NPE errors below.
-          parent = args.dst;
-        }
-        if (!dstfs.exists(parent)) {
-          dstfs.mkdirs(parent);
-        }
-        logPath = new Path(parent, filename);
-      } else {
-        logPath = new Path(args.dst, filename);
-      }
-    }
-    FileOutputFormat.setOutputPath(jobConf, logPath);
-
-    // create src list, dst list
-    FileSystem jobfs = jobDirectory.getFileSystem(jobConf);
-
-    Path srcfilelist = new Path(jobDirectory, "_distcp_src_files");
-    Path dstfilelist = new Path(jobDirectory, "_distcp_dst_files");
-    Path dstdirlist = new Path(jobDirectory, "_distcp_dst_dirs");
-    jobConf.set(SRC_LIST_LABEL, srcfilelist.toString());
-    jobConf.set(DST_DIR_LIST_LABEL, dstdirlist.toString());
-    int srcCount = 0, cnsyncf = 0, dirsyn = 0;
-    long fileCount = 0L, dirCount = 0L, byteCount = 0L, cbsyncs = 0L,
-         skipFileCount = 0L, skipByteCount = 0L;
-    try (
-        SequenceFile.Writer src_writer = SequenceFile.createWriter(jobConf,
-            Writer.file(srcfilelist), Writer.keyClass(LongWritable.class),
-            Writer.valueClass(FilePair.class), Writer.compression(
-            SequenceFile.CompressionType.NONE));
-        SequenceFile.Writer dst_writer = SequenceFile.createWriter(jobConf,
-            Writer.file(dstfilelist), Writer.keyClass(Text.class),
-            Writer.valueClass(Text.class), Writer.compression(
-            SequenceFile.CompressionType.NONE));
-        SequenceFile.Writer dir_writer = SequenceFile.createWriter(jobConf,
-            Writer.file(dstdirlist), Writer.keyClass(Text.class),
-            Writer.valueClass(FilePair.class), Writer.compression(
-            SequenceFile.CompressionType.NONE));
-    ) {
-      // handle the case where the destination directory doesn't exist
-      // and we've only a single src directory OR we're updating/overwriting
-      // the contents of the destination directory.
-      final boolean special =
-        (args.srcs.size() == 1 && !dstExists) || update || overwrite;
-
-      Path basedir = null;
-      HashSet<Path> parentDirsToCopy = new HashSet<Path>();
-      if (args.basedir != null) {
-        FileSystem basefs = args.basedir.getFileSystem(conf);
-        basedir = args.basedir.makeQualified(
-            basefs.getUri(), basefs.getWorkingDirectory());
-        if (!basefs.isDirectory(basedir)) {
-          throw new IOException("Basedir " + basedir + " is not a directory.");
-        }
-      }
-
-      for(Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext(); ) {
-        final Path src = srcItr.next();
-        FileSystem srcfs = src.getFileSystem(conf);
-        FileStatus srcfilestat = srcfs.getFileStatus(src);
-        Path root = special && srcfilestat.isDirectory()? src: src.getParent();
-        if (dstExists && !dstIsDir &&
-            (args.srcs.size() > 1 || srcfilestat.isDirectory())) {
-          // destination should not be a file
-          throw new IOException("Destination " + args.dst + " should be a dir" +
-                                " if multiple source paths are there OR if" +
-                                " the source path is a dir");
-        }
-
-        if (basedir != null) {
-          root = basedir;
-          Path parent = src.getParent().makeQualified(
-              srcfs.getUri(), srcfs.getWorkingDirectory());
-          while (parent != null && !parent.equals(basedir)) {
-            if (!parentDirsToCopy.contains(parent)){
-              parentDirsToCopy.add(parent);
-              String dst = makeRelative(root, parent);
-              FileStatus pst = srcfs.getFileStatus(parent);
-              src_writer.append(new LongWritable(0), new FilePair(pst, dst));
-              dst_writer.append(new Text(dst), new Text(parent.toString()));
-              dir_writer.append(new Text(dst), new FilePair(pst, dst));
-              if (++dirsyn > SYNC_FILE_MAX) {
-                dirsyn = 0;
-                dir_writer.sync();                
-              }
-            }
-            parent = parent.getParent();
-          }
-          
-          if (parent == null) {
-            throw new IOException("Basedir " + basedir + 
-                " is not a prefix of source path " + src);
-          }
-        }
-        
-        if (srcfilestat.isDirectory()) {
-          ++srcCount;
-          final String dst = makeRelative(root,src);
-          if (!update || !dirExists(conf, new Path(args.dst, dst))) {
-            ++dirCount;
-            src_writer.append(new LongWritable(0),
-                              new FilePair(srcfilestat, dst));
-          }
-          dst_writer.append(new Text(dst), new Text(src.toString()));
-        }
-
-        Stack<FileStatus> pathstack = new Stack<FileStatus>();
-        for(pathstack.push(srcfilestat); !pathstack.empty(); ) {
-          FileStatus cur = pathstack.pop();
-          FileStatus[] children = srcfs.listStatus(cur.getPath());
-          for(int i = 0; i < children.length; i++) {
-            boolean skipPath = false;
-            final FileStatus child = children[i]; 
-            final String dst = makeRelative(root, child.getPath());
-            ++srcCount;
-
-            if (child.isDirectory()) {
-              pathstack.push(child);
-              if (!update || !dirExists(conf, new Path(args.dst, dst))) {
-                ++dirCount;
-              }
-              else {
-                skipPath = true; // skip creating dir at destination
-              }
-            }
-            else {
-              Path destPath = new Path(args.dst, dst);
-              if (cur.isFile() && (args.srcs.size() == 1)) {
-                // Copying a single file; use dst path provided by user as
-                // destination file rather than destination directory
-                Path dstparent = destPath.getParent();
-                FileSystem destFileSys = destPath.getFileSystem(jobConf);
-                if (!(destFileSys.exists(dstparent) &&
-                    destFileSys.getFileStatus(dstparent).isDirectory())) {
-                  destPath = dstparent;
-                }
-              }
-              //skip path if the src and the dst files are the same.
-              skipPath = update && 
-              	sameFile(srcfs, child, dstfs, destPath, skipCRCCheck);
-              //skip path if it exceed file limit or size limit
-              skipPath |= fileCount == args.filelimit
-                          || byteCount + child.getLen() > args.sizelimit; 
-
-              if (!skipPath) {
-                ++fileCount;
-                byteCount += child.getLen();
-
-                if (LOG.isTraceEnabled()) {
-                  LOG.trace("adding file " + child.getPath());
-                }
-
-                ++cnsyncf;
-                cbsyncs += child.getLen();
-                if (cnsyncf > SYNC_FILE_MAX || cbsyncs > maxBytesPerMap) {
-                  src_writer.sync();
-                  dst_writer.sync();
-                  cnsyncf = 0;
-                  cbsyncs = 0L;
-                }
-              }
-              else {
-                ++skipFileCount;
-                skipByteCount += child.getLen();
-                if (LOG.isTraceEnabled()) {
-                  LOG.trace("skipping file " + child.getPath());
-                }
-              }
-            }
-
-            if (!skipPath) {
-              src_writer.append(new LongWritable(child.isDirectory()? 0: child.getLen()),
-                  new FilePair(child, dst));
-            }
-
-            dst_writer.append(new Text(dst),
-                new Text(child.getPath().toString()));
-          }
-
-          if (cur.isDirectory()) {
-            String dst = makeRelative(root, cur.getPath());
-            dir_writer.append(new Text(dst), new FilePair(cur, dst));
-            if (++dirsyn > SYNC_FILE_MAX) {
-              dirsyn = 0;
-              dir_writer.sync();                
-            }
-          }
-        }
-      }
-    }
-    LOG.info("sourcePathsCount(files+directories)=" + srcCount);
-    LOG.info("filesToCopyCount=" + fileCount);
-    LOG.info("bytesToCopyCount=" +
-             TraditionalBinaryPrefix.long2String(byteCount, "", 1));
-    if (update) {
-      LOG.info("filesToSkipCopyCount=" + skipFileCount);
-      LOG.info("bytesToSkipCopyCount=" +
-               TraditionalBinaryPrefix.long2String(skipByteCount, "", 1));
-    }
-    if (args.dryrun) {
-      return false;
-    }
-    int mapCount = setMapCount(byteCount, jobConf);
-    // Increase the replication of _distcp_src_files, if needed
-    setReplication(conf, jobConf, srcfilelist, mapCount);
-    
-    FileStatus dststatus = null;
-    try {
-      dststatus = dstfs.getFileStatus(args.dst);
-    } catch(FileNotFoundException fnfe) {
-      LOG.info(args.dst + " does not exist.");
-    }
-
-    // create dest path dir if copying > 1 file
-    if (dststatus == null) {
-      if (srcCount > 1 && !dstfs.mkdirs(args.dst)) {
-        throw new IOException("Failed to create" + args.dst);
-      }
-    }
-    
-    final Path sorted = new Path(jobDirectory, "_distcp_sorted"); 
-    checkDuplication(jobfs, dstfilelist, sorted, conf);
-
-    if (dststatus != null && args.flags.contains(Options.DELETE)) {
-      long deletedPathsCount = deleteNonexisting(dstfs, dststatus, sorted,
-          jobfs, jobDirectory, jobConf, conf);
-      LOG.info("deletedPathsFromDestCount(files+directories)=" +
-               deletedPathsCount);
-    }
-
-    Path tmpDir = new Path(
-        (dstExists && !dstIsDir) || (!dstExists && srcCount == 1)?
-        args.dst.getParent(): args.dst, "_distcp_tmp_" + randomId);
-    jobConf.set(TMP_DIR_LABEL, tmpDir.toUri().toString());
-
-    // Explicitly create the tmpDir to ensure that it can be cleaned
-    // up by fullyDelete() later.
-    tmpDir.getFileSystem(conf).mkdirs(tmpDir);
-
-    LOG.info("sourcePathsCount=" + srcCount);
-    LOG.info("filesToCopyCount=" + fileCount);
-    LOG.info("bytesToCopyCount=" +
-             TraditionalBinaryPrefix.long2String(byteCount, "", 1));
-    jobConf.setInt(SRC_COUNT_LABEL, srcCount);
-    jobConf.setLong(TOTAL_SIZE_LABEL, byteCount);
-    
-    return (fileCount + dirCount) > 0;
-  }
-
-  /**
-   * Check whether the contents of src and dst are the same.
-   * 
-   * Return false if dstpath does not exist
-   * 
-   * If the files have different sizes, return false.
-   * 
-   * If the files have the same sizes, the file checksums will be compared.
-   * 
-   * When file checksum is not supported in any of file systems,
-   * two files are considered as the same if they have the same size.
-   */
-  static private boolean sameFile(FileSystem srcfs, FileStatus srcstatus,
-      FileSystem dstfs, Path dstpath, boolean skipCRCCheck) throws IOException {
-    FileStatus dststatus;
-    try {
-      dststatus = dstfs.getFileStatus(dstpath);
-    } catch(FileNotFoundException fnfe) {
-      return false;
-    }
-
-    //same length?
-    if (srcstatus.getLen() != dststatus.getLen()) {
-      return false;
-    }
-
-    if (skipCRCCheck) {
-      LOG.debug("Skipping the CRC check");
-      return true;
-    }
-    
-    //get src checksum
-    final FileChecksum srccs;
-    try {
-      srccs = srcfs.getFileChecksum(srcstatus.getPath());
-    } catch(FileNotFoundException fnfe) {
-      /*
-       * Two possible cases:
-       * (1) src existed once but was deleted between the time period that
-       *     srcstatus was obtained and the try block above.
-       * (2) srcfs does not support file checksum and (incorrectly) throws
-       *     FNFE, e.g. some previous versions of HftpFileSystem.
-       * For case (1), it is okay to return true since src was already deleted.
-       * For case (2), true should be returned.  
-       */
-      return true;
-    }
-
-    //compare checksums
-    try {
-      final FileChecksum dstcs = dstfs.getFileChecksum(dststatus.getPath());
-      //return true if checksum is not supported
-      //(i.e. some of the checksums is null)
-      return srccs == null || dstcs == null || srccs.equals(dstcs);
-    } catch(FileNotFoundException fnfe) {
-      return false;
-    }
-  }
-  
-  /**
-   * Delete the dst files/dirs which do not exist in src
-   * 
-   * @return total count of files and directories deleted from destination
-   * @throws IOException
-   */
-  static private long deleteNonexisting(
-      FileSystem dstfs, FileStatus dstroot, Path dstsorted,
-      FileSystem jobfs, Path jobdir, JobConf jobconf, Configuration conf
-      ) throws IOException {
-    if (dstroot.isFile()) {
-      throw new IOException("dst must be a directory when option "
-          + Options.DELETE.cmd + " is set, but dst (= " + dstroot.getPath()
-          + ") is not a directory.");
-    }
-
-    //write dst lsr results
-    final Path dstlsr = new Path(jobdir, "_distcp_dst_lsr");
-    try (final SequenceFile.Writer writer = SequenceFile.createWriter(jobconf,
-        Writer.file(dstlsr), Writer.keyClass(Text.class),
-        Writer.valueClass(NullWritable.class), Writer.compression(
-        SequenceFile.CompressionType.NONE))) {
-      //do lsr to get all file statuses in dstroot
-      final Stack<FileStatus> lsrstack = new Stack<FileStatus>();
-      for(lsrstack.push(dstroot); !lsrstack.isEmpty(); ) {
-        final FileStatus status = lsrstack.pop();
-        if (status.isDirectory()) {
-          for(FileStatus child : dstfs.listStatus(status.getPath())) {
-            String relative = makeRelative(dstroot.getPath(), child.getPath());
-            writer.append(new Text(relative), NullWritable.get());
-            lsrstack.push(child);
-          }
-        }
-      }
-    }
-
-    //sort lsr results
-    final Path sortedlsr = new Path(jobdir, "_distcp_dst_lsr_sorted");
-    SequenceFile.Sorter sorter = new SequenceFile.Sorter(jobfs,
-        new Text.Comparator(), Text.class, NullWritable.class, jobconf);
-    sorter.sort(dstlsr, sortedlsr);
-
-    //compare lsr list and dst list  
-    long deletedPathsCount = 0;
-    try (SequenceFile.Reader lsrin =
-             new SequenceFile.Reader(jobconf, Reader.file(sortedlsr));
-         SequenceFile.Reader  dstin =
-             new SequenceFile.Reader(jobconf, Reader.file(dstsorted))) {
-      //compare sorted lsr list and sorted dst list
-      final Text lsrpath = new Text();
-      final Text dstpath = new Text();
-      final Text dstfrom = new Text();
-      final Trash trash = new Trash(dstfs, conf);
-      Path lastpath = null;
-
-      boolean hasnext = dstin.next(dstpath, dstfrom);
-      while (lsrin.next(lsrpath, NullWritable.get())) {
-        int dst_cmp_lsr = dstpath.compareTo(lsrpath);
-        while (hasnext && dst_cmp_lsr < 0) {
-          hasnext = dstin.next(dstpath, dstfrom);
-          dst_cmp_lsr = dstpath.compareTo(lsrpath);
-        }
-        
-        if (dst_cmp_lsr == 0) {
-          //lsrpath exists in dst, skip it
-          hasnext = dstin.next(dstpath, dstfrom);
-        } else {
-          //lsrpath does not exist, delete it
-          final Path rmpath = new Path(dstroot.getPath(), lsrpath.toString());
-          ++deletedPathsCount;
-          if ((lastpath == null || !isAncestorPath(lastpath, rmpath))) {
-            if (!(trash.moveToTrash(rmpath) || dstfs.delete(rmpath, true))) {
-              throw new IOException("Failed to delete " + rmpath);
-            }
-            lastpath = rmpath;
-          }
-        }
-      }
-    }
-    return deletedPathsCount;
-  }
-
-  //is x an ancestor path of y?
-  static private boolean isAncestorPath(Path xp, Path yp) {
-    final String x = xp.toString();
-    final String y = yp.toString();
-    if (!y.startsWith(x)) {
-      return false;
-    }
-    final int len = x.length();
-    return y.length() == len || y.charAt(len) == Path.SEPARATOR_CHAR;  
-  }
-  
-  /** Check whether the file list have duplication. */
-  static private void checkDuplication(FileSystem fs, Path file, Path sorted,
-    Configuration conf) throws IOException {
-    SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs,
-      new Text.Comparator(), Text.class, Text.class, conf);
-    sorter.sort(file, sorted);
-    try (SequenceFile.Reader in =
-         new SequenceFile.Reader(conf, Reader.file(sorted))) {
-      Text prevdst = null, curdst = new Text();
-      Text prevsrc = null, cursrc = new Text(); 
-      for(; in.next(curdst, cursrc); ) {
-        if (prevdst != null && curdst.equals(prevdst)) {
-          throw new DuplicationException(
-            "Invalid input, there are duplicated files in the sources: "
-            + prevsrc + ", " + cursrc);
-        }
-        prevdst = curdst;
-        curdst = new Text();
-        prevsrc = cursrc;
-        cursrc = new Text();
-      }
-    }
-  } 
-
-  /** An exception class for duplicated source files. */
-  public static class DuplicationException extends IOException {
-    private static final long serialVersionUID = 1L;
-    /** Error code for this exception */
-    public static final int ERROR_CODE = -2;
-    DuplicationException(String message) {super(message);}
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa730ce/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
deleted file mode 100644
index 05e6e24..0000000
--- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.tools;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.Random;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configuration.DeprecationDelta;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.FileOutputFormat;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapReduceBase;
-import org.apache.hadoop.mapred.Mapper;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.TextInputFormat;
-import org.apache.hadoop.mapred.TextOutputFormat;
-import org.apache.hadoop.mapred.lib.LongSumReducer;
-import org.apache.hadoop.mapreduce.lib.map.RegexMapper;
-
-/**
- * Logalyzer: A utility tool for archiving and analyzing hadoop logs.
- * <p>
- * This tool supports archiving and anaylzing (sort/grep) of log-files.
- * It takes as input
- *  a) Input uri which will serve uris of the logs to be archived.
- *  b) Output directory (not mandatory).
- *  b) Directory on dfs to archive the logs. 
- *  c) The sort/grep patterns for analyzing the files and separator for boundaries.
- * Usage: 
- * Logalyzer -archive -archiveDir &lt;directory to archive logs&gt; -analysis
- * &lt;directory&gt; -logs &lt;log-list uri&gt; -grep &lt;pattern&gt; -sort
- * &lt;col1, col2&gt; -separator &lt;separator&gt;
- * <p>
- */
-@Deprecated
-public class Logalyzer {
-  // Constants
-  private static Configuration fsConfig = new Configuration();
-  public static final String SORT_COLUMNS = 
-    "logalizer.logcomparator.sort.columns";
-  public static final String COLUMN_SEPARATOR = 
-    "logalizer.logcomparator.column.separator";
-  
-  static {
-    Configuration.addDeprecations(new DeprecationDelta[] {
-      new DeprecationDelta("mapred.reducer.sort", SORT_COLUMNS),
-      new DeprecationDelta("mapred.reducer.separator", COLUMN_SEPARATOR)
-    });
-  }
-
-  /** A {@link Mapper} that extracts text matching a regular expression. */
-  public static class LogRegexMapper<K extends WritableComparable>
-    extends MapReduceBase
-    implements Mapper<K, Text, Text, LongWritable> {
-    
-    private Pattern pattern;
-    
-    public void configure(JobConf job) {
-      pattern = Pattern.compile(job.get(RegexMapper.PATTERN));
-    }
-    
-    public void map(K key, Text value,
-                    OutputCollector<Text, LongWritable> output,
-                    Reporter reporter)
-      throws IOException {
-      String text = value.toString();
-      Matcher matcher = pattern.matcher(text);
-      while (matcher.find()) {
-        output.collect(value, new LongWritable(1));
-      }
-    }
-    
-  }
-  
-  /** A WritableComparator optimized for UTF8 keys of the logs. */
-  public static class LogComparator extends Text.Comparator implements Configurable {
-    
-    private static Log LOG = LogFactory.getLog(Logalyzer.class);
-    private JobConf conf = null;
-    private String[] sortSpec = null;
-    private String columnSeparator = null;
-    
-    public void setConf(Configuration conf) {
-      if (conf instanceof JobConf) {
-        this.conf = (JobConf) conf;
-      } else {
-        this.conf = new JobConf(conf);
-      }
-      
-      //Initialize the specification for *comparision*
-      String sortColumns = this.conf.get(SORT_COLUMNS, null);
-      if (sortColumns != null) {
-        sortSpec = sortColumns.split(",");
-      }
-      
-      //Column-separator
-      columnSeparator = this.conf.get(COLUMN_SEPARATOR, "");
-    }
-    
-    public Configuration getConf() {
-      return conf;
-    }
-    
-    public int compare(byte[] b1, int s1, int l1,
-                       byte[] b2, int s2, int l2) {
-      
-      if (sortSpec == null) {
-        return super.compare(b1, s1, l1, b2, s2, l2);
-      }
-      
-      try {
-        Text logline1 = new Text(); 
-        logline1.readFields(new DataInputStream(new ByteArrayInputStream(b1, s1, l1)));
-        String line1 = logline1.toString();
-        String[] logColumns1 = line1.split(columnSeparator);
-        
-        Text logline2 = new Text(); 
-        logline2.readFields(new DataInputStream(new ByteArrayInputStream(b2, s2, l2)));
-        String line2 = logline2.toString();
-        String[] logColumns2 = line2.split(columnSeparator);
-        
-        if (logColumns1 == null || logColumns2 == null) {
-          return super.compare(b1, s1, l1, b2, s2, l2);
-        }
-        
-        //Compare column-wise according to *sortSpec*
-        for(int i=0; i < sortSpec.length; ++i) {
-          int column = Integer.parseInt(sortSpec[i]);
-          String c1 = logColumns1[column]; 
-          String c2 = logColumns2[column];
-          
-          //Compare columns
-          int comparision = super.compareBytes(
-                                  c1.getBytes(Charset.forName("UTF-8")), 0, c1.length(),
-                                  c2.getBytes(Charset.forName("UTF-8")), 0, c2.length()
-                                  );
-          
-          //They differ!
-          if (comparision != 0) {
-            return comparision;
-          }
-        }
-        
-      } catch (IOException ioe) {
-        LOG.fatal("Caught " + ioe);
-        return 0;
-      }
-      
-      return 0;
-    }
-    
-    static {                                        
-      // register this comparator
-      WritableComparator.define(Text.class, new LogComparator());
-    }
-  }
-  
-  /**
-   * doArchive: Workhorse function to archive log-files.
-   * @param logListURI : The uri which will serve list of log-files to archive.
-   * @param archiveDirectory : The directory to store archived logfiles.
-   * @throws IOException
-   */
-  @SuppressWarnings("deprecation")
-  public void	
-    doArchive(String logListURI, String archiveDirectory)
-    throws IOException
-  {
-    String destURL = FileSystem.getDefaultUri(fsConfig) + archiveDirectory;
-    DistCpV1.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false);
-  }
-  
-  /**
-   * doAnalyze: 
-   * @param inputFilesDirectory : Directory containing the files to be analyzed.
-   * @param outputDirectory : Directory to store analysis (output).
-   * @param grepPattern : Pattern to *grep* for.
-   * @param sortColumns : Sort specification for output.
-   * @param columnSeparator : Column separator.
-   * @throws IOException
-   */
-  public void
-    doAnalyze(String inputFilesDirectory, String outputDirectory,
-              String grepPattern, String sortColumns, String columnSeparator)
-    throws IOException
-  {		
-    Path grepInput = new Path(inputFilesDirectory);
-    
-    Path analysisOutput = null;
-    if (outputDirectory.equals("")) {
-      analysisOutput =  new Path(inputFilesDirectory, "logalyzer_" + 
-                                 Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
-    } else {
-      analysisOutput = new Path(outputDirectory);
-    }
-    
-    JobConf grepJob = new JobConf(fsConfig);
-    grepJob.setJobName("logalyzer-grep-sort");
-    
-    FileInputFormat.setInputPaths(grepJob, grepInput);
-    grepJob.setInputFormat(TextInputFormat.class);
-    
-    grepJob.setMapperClass(LogRegexMapper.class);
-    grepJob.set(RegexMapper.PATTERN, grepPattern);
-    grepJob.set(SORT_COLUMNS, sortColumns);
-    grepJob.set(COLUMN_SEPARATOR, columnSeparator);
-    
-    grepJob.setCombinerClass(LongSumReducer.class);
-    grepJob.setReducerClass(LongSumReducer.class);
-    
-    FileOutputFormat.setOutputPath(grepJob, analysisOutput);
-    grepJob.setOutputFormat(TextOutputFormat.class);
-    grepJob.setOutputKeyClass(Text.class);
-    grepJob.setOutputValueClass(LongWritable.class);
-    grepJob.setOutputKeyComparatorClass(LogComparator.class);
-    
-    grepJob.setNumReduceTasks(1);                 // write a single file
-    
-    JobClient.runJob(grepJob);
-  }
-  
-  public static void main(String[] args) {
-    
-    Log LOG = LogFactory.getLog(Logalyzer.class);
-    
-    String version = "Logalyzer.0.0.1";
-    String usage = "Usage: Logalyzer [-archive -logs <urlsFile>] " +
-      "-archiveDir <archiveDirectory> " +
-      "-grep <pattern> -sort <column1,column2,...> -separator <separator> " +
-      "-analysis <outputDirectory>";
-    
-    System.out.println(version);
-    if (args.length == 0) {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    
-    //Command line arguments
-    boolean archive = false;
-    boolean grep = false;
-    boolean sort = false;
-    
-    String archiveDir = "";
-    String logListURI = "";
-    String grepPattern = ".*";
-    String sortColumns = "";
-    String columnSeparator = " ";
-    String outputDirectory = "";
-    
-    for (int i = 0; i < args.length; i++) { // parse command line
-      if (args[i].equals("-archive")) {
-        archive = true;
-      } else if (args[i].equals("-archiveDir")) {
-        archiveDir = args[++i];
-      } else if (args[i].equals("-grep")) {
-        grep = true;
-        grepPattern = args[++i];
-      } else if (args[i].equals("-logs")) {
-        logListURI = args[++i];
-      } else if (args[i].equals("-sort")) {
-        sort = true;
-        sortColumns = args[++i];
-      } else if (args[i].equals("-separator")) {
-        columnSeparator = args[++i];
-      } else if (args[i].equals("-analysis")) {
-        outputDirectory = args[++i];
-      }
-    }
-    
-    LOG.info("analysisDir = " + outputDirectory);
-    LOG.info("archiveDir = " + archiveDir);
-    LOG.info("logListURI = " + logListURI);
-    LOG.info("grepPattern = " + grepPattern);
-    LOG.info("sortColumns = " + sortColumns);
-    LOG.info("separator = " + columnSeparator);
-    
-    try {
-      Logalyzer logalyzer = new Logalyzer();
-      
-      // Archive?
-      if (archive) {
-        logalyzer.doArchive(logListURI, archiveDir);
-      }
-      
-      // Analyze?
-      if (grep || sort) {
-        logalyzer.doAnalyze(archiveDir, outputDirectory, grepPattern, sortColumns, columnSeparator);
-      }
-    } catch (IOException ioe) {
-      ioe.printStackTrace();
-      System.exit(-1);
-    }
-    
-  } //main
-  
-} //class Logalyzer


[10/50] [abbrv] hadoop git commit: HDFS-8394. Move getAdditionalBlock() and related functionalities into a separate class. Contributed by Haohui Mai.

Posted by ji...@apache.org.
HDFS-8394. Move getAdditionalBlock() and related functionalities into a separate class. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5afac58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5afac58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5afac58

Branch: refs/heads/HDFS-7240
Commit: e5afac5896a1a88e152746598527d91f73cbb724
Parents: 8f37873
Author: Haohui Mai <wh...@apache.org>
Authored: Fri May 15 19:09:59 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Fri May 15 19:09:59 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 563 +++++++++++++++++++
 .../hdfs/server/namenode/FSDirectory.java       |  78 +--
 .../hdfs/server/namenode/FSEditLogLoader.java   |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java      | 497 ++--------------
 .../hdfs/server/namenode/NameNodeRpcServer.java |  30 +-
 .../hdfs/server/namenode/TestAddBlockRetry.java |  30 +-
 .../TestCommitBlockSynchronization.java         |   3 +
 8 files changed, 648 insertions(+), 559 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 35e81f9..4a33987 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -557,6 +557,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8397. Refactor the error handling code in DataStreamer.
     (Tsz Wo Nicholas Sze via jing9)
 
+    HDFS-8394. Move getAdditionalBlock() and related functionalities into a
+    separate class. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
new file mode 100644
index 0000000..1ff0899
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -0,0 +1,563 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.net.NodeBase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+class FSDirWriteFileOp {
+  private FSDirWriteFileOp() {}
+  static boolean unprotectedRemoveBlock(
+      FSDirectory fsd, String path, INodesInPath iip, INodeFile fileNode,
+      Block block) throws IOException {
+    // modify file-> block and blocksMap
+    // fileNode should be under construction
+    BlockInfoContiguousUnderConstruction uc = fileNode.removeLastBlock(block);
+    if (uc == null) {
+      return false;
+    }
+    fsd.getBlockManager().removeBlockFromMap(block);
+
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "
+          +path+" with "+block
+          +" block is removed from the file system");
+    }
+
+    // update space consumed
+    fsd.updateCount(iip, 0, -fileNode.getPreferredBlockSize(),
+                    fileNode.getPreferredBlockReplication(), true);
+    return true;
+  }
+
+  /**
+   * Persist the block list for the inode.
+   */
+  static void persistBlocks(
+      FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
+    assert fsd.getFSNamesystem().hasWriteLock();
+    Preconditions.checkArgument(file.isUnderConstruction());
+    fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("persistBlocks: " + path
+              + " with " + file.getBlocks().length + " blocks is persisted to" +
+              " the file system");
+    }
+  }
+
+  static void abandonBlock(
+      FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId,
+      String src, String holder) throws IOException {
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    src = fsd.resolvePath(pc, src, pathComponents);
+
+    final INode inode;
+    final INodesInPath iip;
+    if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
+      // Older clients may not have given us an inode ID to work with.
+      // In this case, we have to try to resolve the path and hope it
+      // hasn't changed or been deleted since the file was opened for write.
+      iip = fsd.getINodesInPath(src, true);
+      inode = iip.getLastINode();
+    } else {
+      inode = fsd.getInode(fileId);
+      iip = INodesInPath.fromINode(inode);
+      if (inode != null) {
+        src = iip.getPath();
+      }
+    }
+    FSNamesystem fsn = fsd.getFSNamesystem();
+    final INodeFile file = fsn.checkLease(src, holder, inode, fileId);
+    Preconditions.checkState(file.isUnderConstruction());
+
+    Block localBlock = ExtendedBlock.getLocalBlock(b);
+    fsd.writeLock();
+    try {
+      // Remove the block from the pending creates list
+      if (!unprotectedRemoveBlock(fsd, src, iip, file, localBlock)) {
+        return;
+      }
+    } finally {
+      fsd.writeUnlock();
+    }
+    persistBlocks(fsd, src, file, false);
+  }
+
+  static void checkBlock(FSNamesystem fsn, ExtendedBlock block)
+      throws IOException {
+    String bpId = fsn.getBlockPoolId();
+    if (block != null && !bpId.equals(block.getBlockPoolId())) {
+      throw new IOException("Unexpected BlockPoolId " + block.getBlockPoolId()
+          + " - expected " + bpId);
+    }
+  }
+
+  /**
+   * Part I of getAdditionalBlock().
+   * Analyze the state of the file under read lock to determine if the client
+   * can add a new block, detect potential retries, lease mismatches,
+   * and minimal replication of the penultimate block.
+   *
+   * Generate target DataNode locations for the new block,
+   * but do not create the new block yet.
+   */
+  static ValidateAddBlockResult validateAddBlock(
+      FSNamesystem fsn, FSPermissionChecker pc,
+      String src, long fileId, String clientName,
+      ExtendedBlock previous, LocatedBlock[] onRetryBlock) throws IOException {
+    final long blockSize;
+    final int replication;
+    final byte storagePolicyID;
+    String clientMachine;
+
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    src = fsn.dir.resolvePath(pc, src, pathComponents);
+    FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
+                                           previous, onRetryBlock);
+    final INodeFile pendingFile = fileState.inode;
+    // Check if the penultimate block is minimally replicated
+    if (!fsn.checkFileProgress(src, pendingFile, false)) {
+      throw new NotReplicatedYetException("Not replicated yet: " + src);
+    }
+
+    if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
+      // This is a retry. No need to generate new locations.
+      // Use the last block if it has locations.
+      return null;
+    }
+    if (pendingFile.getBlocks().length >= fsn.maxBlocksPerFile) {
+      throw new IOException("File has reached the limit on maximum number of"
+          + " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY
+          + "): " + pendingFile.getBlocks().length + " >= "
+          + fsn.maxBlocksPerFile);
+    }
+    blockSize = pendingFile.getPreferredBlockSize();
+    clientMachine = pendingFile.getFileUnderConstructionFeature()
+        .getClientMachine();
+    replication = pendingFile.getFileReplication();
+    storagePolicyID = pendingFile.getStoragePolicyID();
+    return new ValidateAddBlockResult(blockSize, replication, storagePolicyID,
+                                    clientMachine);
+  }
+
+  static LocatedBlock makeLocatedBlock(FSNamesystem fsn, Block blk,
+      DatanodeStorageInfo[] locs, long offset) throws IOException {
+    LocatedBlock lBlk = BlockManager.newLocatedBlock(fsn.getExtendedBlock(blk),
+                                                     locs, offset, false);
+    fsn.getBlockManager().setBlockToken(lBlk,
+                                        BlockTokenIdentifier.AccessMode.WRITE);
+    return lBlk;
+  }
+
+  /**
+   * Part II of getAdditionalBlock().
+   * Should repeat the same analysis of the file state as in Part 1,
+   * but under the write lock.
+   * If the conditions still hold, then allocate a new block with
+   * the new targets, add it to the INode and to the BlocksMap.
+   */
+  static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src,
+      long fileId, String clientName, ExtendedBlock previous,
+      DatanodeStorageInfo[] targets) throws IOException {
+    long offset;
+    // Run the full analysis again, since things could have changed
+    // while chooseTarget() was executing.
+    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
+    FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
+                                           previous, onRetryBlock);
+    final INodeFile pendingFile = fileState.inode;
+    src = fileState.path;
+
+    if (onRetryBlock[0] != null) {
+      if (onRetryBlock[0].getLocations().length > 0) {
+        // This is a retry. Just return the last block if having locations.
+        return onRetryBlock[0];
+      } else {
+        // add new chosen targets to already allocated block and return
+        BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
+        ((BlockInfoContiguousUnderConstruction) lastBlockInFile)
+            .setExpectedLocations(targets);
+        offset = pendingFile.computeFileSize();
+        return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
+      }
+    }
+
+    // commit the last block and complete it if it has minimum replicas
+    fsn.commitOrCompleteLastBlock(pendingFile, fileState.iip,
+                                  ExtendedBlock.getLocalBlock(previous));
+
+    // allocate new block, record block locations in INode.
+    Block newBlock = fsn.createNewBlock();
+    INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
+    saveAllocatedBlock(fsn, src, inodesInPath, newBlock, targets);
+
+    persistNewBlock(fsn, src, pendingFile);
+    offset = pendingFile.computeFileSize();
+
+    // Return located block
+    return makeLocatedBlock(fsn, newBlock, targets, offset);
+  }
+
+  static DatanodeStorageInfo[] chooseTargetForNewBlock(
+      BlockManager bm, String src, DatanodeInfo[] excludedNodes, String[]
+      favoredNodes, ValidateAddBlockResult r) throws IOException {
+    Node clientNode = bm.getDatanodeManager()
+        .getDatanodeByHost(r.clientMachine);
+    if (clientNode == null) {
+      clientNode = getClientNode(bm, r.clientMachine);
+    }
+
+    Set<Node> excludedNodesSet = null;
+    if (excludedNodes != null) {
+      excludedNodesSet = new HashSet<>(excludedNodes.length);
+      Collections.addAll(excludedNodesSet, excludedNodes);
+    }
+    List<String> favoredNodesList = (favoredNodes == null) ? null
+        : Arrays.asList(favoredNodes);
+
+    // choose targets for the new block to be allocated.
+    return bm.chooseTarget4NewBlock(src, r.replication, clientNode,
+                                    excludedNodesSet, r.blockSize,
+                                    favoredNodesList, r.storagePolicyID);
+  }
+
+  /**
+   * Resolve clientmachine address to get a network location path
+   */
+  static Node getClientNode(BlockManager bm, String clientMachine) {
+    List<String> hosts = new ArrayList<>(1);
+    hosts.add(clientMachine);
+    List<String> rName = bm.getDatanodeManager()
+        .resolveNetworkLocation(hosts);
+    Node clientNode = null;
+    if (rName != null) {
+      // Able to resolve clientMachine mapping.
+      // Create a temp node to findout the rack local nodes
+      clientNode = new NodeBase(rName.get(0) + NodeBase.PATH_SEPARATOR_STR
+          + clientMachine);
+    }
+    return clientNode;
+  }
+
+  /**
+   * Add a block to the file. Returns a reference to the added block.
+   */
+  private static BlockInfoContiguous addBlock(
+      FSDirectory fsd, String path, INodesInPath inodesInPath, Block block,
+      DatanodeStorageInfo[] targets) throws IOException {
+    fsd.writeLock();
+    try {
+      final INodeFile fileINode = inodesInPath.getLastINode().asFile();
+      Preconditions.checkState(fileINode.isUnderConstruction());
+
+      // check quota limits and updated space consumed
+      fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
+          fileINode.getPreferredBlockReplication(), true);
+
+      // associate new last block for the file
+      BlockInfoContiguousUnderConstruction blockInfo =
+        new BlockInfoContiguousUnderConstruction(
+            block,
+            fileINode.getFileReplication(),
+            HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
+            targets);
+      fsd.getBlockManager().addBlockCollection(blockInfo, fileINode);
+      fileINode.addBlock(blockInfo);
+
+      if(NameNode.stateChangeLog.isDebugEnabled()) {
+        NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: "
+            + path + " with " + block
+            + " block is added to the in-memory "
+            + "file system");
+      }
+      return blockInfo;
+    } finally {
+      fsd.writeUnlock();
+    }
+  }
+
+  private static FileState analyzeFileState(
+      FSNamesystem fsn, String src, long fileId, String clientName,
+      ExtendedBlock previous, LocatedBlock[] onRetryBlock)
+          throws IOException  {
+    assert fsn.hasReadLock();
+
+    checkBlock(fsn, previous);
+    onRetryBlock[0] = null;
+    fsn.checkNameNodeSafeMode("Cannot add block to " + src);
+
+    // have we exceeded the configured limit of fs objects.
+    fsn.checkFsObjectLimit();
+
+    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
+    final INode inode;
+    final INodesInPath iip;
+    if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
+      // Older clients may not have given us an inode ID to work with.
+      // In this case, we have to try to resolve the path and hope it
+      // hasn't changed or been deleted since the file was opened for write.
+      iip = fsn.dir.getINodesInPath4Write(src);
+      inode = iip.getLastINode();
+    } else {
+      // Newer clients pass the inode ID, so we can just get the inode
+      // directly.
+      inode = fsn.dir.getInode(fileId);
+      iip = INodesInPath.fromINode(inode);
+      if (inode != null) {
+        src = iip.getPath();
+      }
+    }
+    final INodeFile file = fsn.checkLease(src, clientName,
+                                                 inode, fileId);
+    BlockInfoContiguous lastBlockInFile = file.getLastBlock();
+    if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
+      // The block that the client claims is the current last block
+      // doesn't match up with what we think is the last block. There are
+      // four possibilities:
+      // 1) This is the first block allocation of an append() pipeline
+      //    which started appending exactly at or exceeding the block boundary.
+      //    In this case, the client isn't passed the previous block,
+      //    so it makes the allocateBlock() call with previous=null.
+      //    We can distinguish this since the last block of the file
+      //    will be exactly a full block.
+      // 2) This is a retry from a client that missed the response of a
+      //    prior getAdditionalBlock() call, perhaps because of a network
+      //    timeout, or because of an HA failover. In that case, we know
+      //    by the fact that the client is re-issuing the RPC that it
+      //    never began to write to the old block. Hence it is safe to
+      //    to return the existing block.
+      // 3) This is an entirely bogus request/bug -- we should error out
+      //    rather than potentially appending a new block with an empty
+      //    one in the middle, etc
+      // 4) This is a retry from a client that timed out while
+      //    the prior getAdditionalBlock() is still being processed,
+      //    currently working on chooseTarget().
+      //    There are no means to distinguish between the first and
+      //    the second attempts in Part I, because the first one hasn't
+      //    changed the namesystem state yet.
+      //    We run this analysis again in Part II where case 4 is impossible.
+
+      BlockInfoContiguous penultimateBlock = file.getPenultimateBlock();
+      if (previous == null &&
+          lastBlockInFile != null &&
+          lastBlockInFile.getNumBytes() >= file.getPreferredBlockSize() &&
+          lastBlockInFile.isComplete()) {
+        // Case 1
+        if (NameNode.stateChangeLog.isDebugEnabled()) {
+           NameNode.stateChangeLog.debug(
+               "BLOCK* NameSystem.allocateBlock: handling block allocation" +
+               " writing to a file with a complete previous block: src=" +
+               src + " lastBlock=" + lastBlockInFile);
+        }
+      } else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
+        if (lastBlockInFile.getNumBytes() != 0) {
+          throw new IOException(
+              "Request looked like a retry to allocate block " +
+              lastBlockInFile + " but it already contains " +
+              lastBlockInFile.getNumBytes() + " bytes");
+        }
+
+        // Case 2
+        // Return the last block.
+        NameNode.stateChangeLog.info("BLOCK* allocateBlock: caught retry for " +
+            "allocation of a new block in " + src + ". Returning previously" +
+            " allocated block " + lastBlockInFile);
+        long offset = file.computeFileSize();
+        BlockInfoContiguousUnderConstruction lastBlockUC =
+            (BlockInfoContiguousUnderConstruction) lastBlockInFile;
+        onRetryBlock[0] = makeLocatedBlock(fsn, lastBlockInFile,
+            lastBlockUC.getExpectedStorageLocations(), offset);
+        return new FileState(file, src, iip);
+      } else {
+        // Case 3
+        throw new IOException("Cannot allocate block in " + src + ": " +
+            "passed 'previous' block " + previous + " does not match actual " +
+            "last block in file " + lastBlockInFile);
+      }
+    }
+    return new FileState(file, src, iip);
+  }
+
+  static boolean completeFile(FSNamesystem fsn, FSPermissionChecker pc,
+      final String srcArg, String holder, ExtendedBlock last, long fileId)
+      throws IOException {
+    String src = srcArg;
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " +
+                                        src + " for " + holder);
+    }
+    checkBlock(fsn, last);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    src = fsn.dir.resolvePath(pc, src, pathComponents);
+    boolean success = completeFileInternal(fsn, src, holder,
+                                           ExtendedBlock.getLocalBlock(last),
+                                           fileId);
+    if (success) {
+      NameNode.stateChangeLog.info("DIR* completeFile: " + srcArg
+                                       + " is closed by " + holder);
+    }
+    return success;
+  }
+
+  private static boolean completeFileInternal(
+      FSNamesystem fsn, String src, String holder, Block last, long fileId)
+      throws IOException {
+    assert fsn.hasWriteLock();
+    final INodeFile pendingFile;
+    final INodesInPath iip;
+    INode inode = null;
+    try {
+      if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
+        // Older clients may not have given us an inode ID to work with.
+        // In this case, we have to try to resolve the path and hope it
+        // hasn't changed or been deleted since the file was opened for write.
+        iip = fsn.dir.getINodesInPath(src, true);
+        inode = iip.getLastINode();
+      } else {
+        inode = fsn.dir.getInode(fileId);
+        iip = INodesInPath.fromINode(inode);
+        if (inode != null) {
+          src = iip.getPath();
+        }
+      }
+      pendingFile = fsn.checkLease(src, holder, inode, fileId);
+    } catch (LeaseExpiredException lee) {
+      if (inode != null && inode.isFile() &&
+          !inode.asFile().isUnderConstruction()) {
+        // This could be a retry RPC - i.e the client tried to close
+        // the file, but missed the RPC response. Thus, it is trying
+        // again to close the file. If the file still exists and
+        // the client's view of the last block matches the actual
+        // last block, then we'll treat it as a successful close.
+        // See HDFS-3031.
+        final Block realLastBlock = inode.asFile().getLastBlock();
+        if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
+          NameNode.stateChangeLog.info("DIR* completeFile: " +
+              "request from " + holder + " to complete inode " + fileId +
+              "(" + src + ") which is already closed. But, it appears to be " +
+              "an RPC retry. Returning success");
+          return true;
+        }
+      }
+      throw lee;
+    }
+    // Check the state of the penultimate block. It should be completed
+    // before attempting to complete the last one.
+    if (!fsn.checkFileProgress(src, pendingFile, false)) {
+      return false;
+    }
+
+    // commit the last block and complete it if it has minimum replicas
+    fsn.commitOrCompleteLastBlock(pendingFile, iip, last);
+
+    if (!fsn.checkFileProgress(src, pendingFile, true)) {
+      return false;
+    }
+
+    fsn.finalizeINodeFileUnderConstruction(src, pendingFile,
+        Snapshot.CURRENT_STATE_ID);
+    return true;
+  }
+
+  /**
+   * Persist the new block (the last block of the given file).
+   */
+  private static void persistNewBlock(
+      FSNamesystem fsn, String path, INodeFile file) {
+    Preconditions.checkArgument(file.isUnderConstruction());
+    fsn.getEditLog().logAddBlock(path, file);
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("persistNewBlock: "
+              + path + " with new block " + file.getLastBlock().toString()
+              + ", current total block count is " + file.getBlocks().length);
+    }
+  }
+
+  /**
+   * Save allocated block at the given pending filename
+   *
+   * @param fsn FSNamesystem
+   * @param src path to the file
+   * @param inodesInPath representing each of the components of src.
+   *                     The last INode is the INode for {@code src} file.
+   * @param newBlock newly allocated block to be save
+   * @param targets target datanodes where replicas of the new block is placed
+   * @throws QuotaExceededException If addition of block exceeds space quota
+   */
+  private static void saveAllocatedBlock(
+      FSNamesystem fsn, String src, INodesInPath inodesInPath, Block newBlock,
+      DatanodeStorageInfo[] targets)
+      throws IOException {
+    assert fsn.hasWriteLock();
+    BlockInfoContiguous b = addBlock(fsn.dir, src, inodesInPath, newBlock,
+                                     targets);
+    NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
+    DatanodeStorageInfo.incrementBlocksScheduled(targets);
+  }
+
+  private static class FileState {
+    final INodeFile inode;
+    final String path;
+    final INodesInPath iip;
+
+    FileState(INodeFile inode, String fullPath, INodesInPath iip) {
+      this.inode = inode;
+      this.path = fullPath;
+      this.iip = iip;
+    }
+  }
+
+  static class ValidateAddBlockResult {
+    final long blockSize;
+    final int replication;
+    final byte storagePolicyID;
+    final String clientMachine;
+
+    ValidateAddBlockResult(
+        long blockSize, int replication, byte storagePolicyID,
+        String clientMachine) {
+      this.blockSize = blockSize;
+      this.replication = replication;
+      this.storagePolicyID = storagePolicyID;
+      this.clientMachine = clientMachine;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 1583815..c2ed956 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -55,12 +55,9 @@ import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.util.ByteArray;
 import org.apache.hadoop.hdfs.util.EnumCounters;
@@ -308,7 +305,7 @@ public class FSDirectory implements Closeable {
     return namesystem;
   }
 
-  private BlockManager getBlockManager() {
+  BlockManager getBlockManager() {
     return getFSNamesystem().getBlockManager();
   }
 
@@ -479,79 +476,6 @@ public class FSDirectory implements Closeable {
   }
 
   /**
-   * Add a block to the file. Returns a reference to the added block.
-   */
-  BlockInfoContiguous addBlock(String path, INodesInPath inodesInPath,
-      Block block, DatanodeStorageInfo[] targets) throws IOException {
-    writeLock();
-    try {
-      final INodeFile fileINode = inodesInPath.getLastINode().asFile();
-      Preconditions.checkState(fileINode.isUnderConstruction());
-
-      // check quota limits and updated space consumed
-      updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
-          fileINode.getPreferredBlockReplication(), true);
-
-      // associate new last block for the file
-      BlockInfoContiguousUnderConstruction blockInfo =
-        new BlockInfoContiguousUnderConstruction(
-            block,
-            fileINode.getFileReplication(),
-            BlockUCState.UNDER_CONSTRUCTION,
-            targets);
-      getBlockManager().addBlockCollection(blockInfo, fileINode);
-      fileINode.addBlock(blockInfo);
-
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: "
-            + path + " with " + block
-            + " block is added to the in-memory "
-            + "file system");
-      }
-      return blockInfo;
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  /**
-   * Remove a block from the file.
-   * @return Whether the block exists in the corresponding file
-   */
-  boolean removeBlock(String path, INodesInPath iip, INodeFile fileNode,
-      Block block) throws IOException {
-    Preconditions.checkArgument(fileNode.isUnderConstruction());
-    writeLock();
-    try {
-      return unprotectedRemoveBlock(path, iip, fileNode, block);
-    } finally {
-      writeUnlock();
-    }
-  }
-  
-  boolean unprotectedRemoveBlock(String path, INodesInPath iip,
-      INodeFile fileNode, Block block) throws IOException {
-    // modify file-> block and blocksMap
-    // fileNode should be under construction
-    BlockInfoContiguousUnderConstruction uc = fileNode.removeLastBlock(block);
-    if (uc == null) {
-      return false;
-    }
-    getBlockManager().removeBlockFromMap(block);
-
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "
-          +path+" with "+block
-          +" block is removed from the file system");
-    }
-
-    // update space consumed
-    updateCount(iip, 0, -fileNode.getPreferredBlockSize(),
-        fileNode.getPreferredBlockReplication(), true);
-    return true;
-  }
-
-  /**
    * This is a wrapper for resolvePath(). If the path passed
    * is prefixed with /.reserved/raw, then it checks to ensure that the caller
    * has super user privileges.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index f75c117..dec1298 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1037,7 +1037,8 @@ public class FSEditLogLoader {
             + path);
       }
       Block oldBlock = oldBlocks[oldBlocks.length - 1];
-      boolean removed = fsDir.unprotectedRemoveBlock(path, iip, file, oldBlock);
+      boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(
+          fsDir, path, iip, file, oldBlock);
       if (!removed && !(op instanceof UpdateBlocksOp)) {
         throw new IOException("Trying to delete non-existant block " + oldBlock);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 4d82fab..0fec5ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -268,7 +268,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
-import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -484,7 +483,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   private final long maxFsObjects;          // maximum number of fs objects
 
   private final long minBlockSize;         // minimum block size
-  private final long maxBlocksPerFile;     // maximum # of blocks per file
+  final long maxBlocksPerFile;     // maximum # of blocks per file
 
   // precision of access times.
   private final long accessTimePrecision;
@@ -614,7 +613,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   boolean isHaEnabled() {
     return haEnabled;
   }
-  
+
   /**
    * Check the supplied configuration for correctness.
    * @param conf Supplies the configuration to validate.
@@ -1863,8 +1862,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);
 
     final LocatedBlocks blocks = blockManager.createLocatedBlocks(
-        inode.getBlocks(iip.getPathSnapshotId()), fileSize,
-        isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo);
+        inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
+        length, needBlockToken, iip.isSnapshot(), feInfo);
 
     // Set caching information for the located blocks.
     for (LocatedBlock lb : blocks.getLocatedBlocks()) {
@@ -2232,8 +2231,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set storage policy for " + src);
-      auditStat = FSDirAttrOp.setStoragePolicy(
-          dir, blockManager, src, policyName);
+      auditStat = FSDirAttrOp.setStoragePolicy(dir, blockManager, src,
+                                               policyName);
     } catch (AccessControlException e) {
       logAuditEvent(false, "setStoragePolicy", src);
       throw e;
@@ -2621,7 +2620,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       return toRemoveBlocks;
     } catch (IOException ie) {
       NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: " + src + " " +
-          ie.getMessage());
+                                       ie.getMessage());
       throw ie;
     }
   }
@@ -2703,8 +2702,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             "Cannot append to lazy persist file " + src);
       }
       // Opening an existing file for append - may need to recover lease.
-      recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE,
-          iip, src, holder, clientMachine, false);
+      recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, src, holder,
+                           clientMachine, false);
       
       final BlockInfoContiguous lastBlock = myFile.getLastBlock();
       // Check that the block has at least minimum replication.
@@ -3042,290 +3041,49 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * are replicated.  Will return an empty 2-elt array if we want the
    * client to "try again later".
    */
-  LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
-      ExtendedBlock previous, Set<Node> excludedNodes, 
-      List<String> favoredNodes) throws IOException {
-    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
-    DatanodeStorageInfo targets[] = getNewBlockTargets(src, fileId,
-        clientName, previous, excludedNodes, favoredNodes, onRetryBlock);
-    if (targets == null) {
-      assert onRetryBlock[0] != null : "Retry block is null";
-      // This is a retry. Just return the last block.
-      return onRetryBlock[0];
-    }
-    LocatedBlock newBlock = storeAllocatedBlock(
-        src, fileId, clientName, previous, targets);
-    return newBlock;
-  }
-
-  /**
-   * Part I of getAdditionalBlock().
-   * Analyze the state of the file under read lock to determine if the client
-   * can add a new block, detect potential retries, lease mismatches,
-   * and minimal replication of the penultimate block.
-   * 
-   * Generate target DataNode locations for the new block,
-   * but do not create the new block yet.
-   */
-  DatanodeStorageInfo[] getNewBlockTargets(String src, long fileId,
-      String clientName, ExtendedBlock previous, Set<Node> excludedNodes,
-      List<String> favoredNodes, LocatedBlock[] onRetryBlock) throws IOException {
-    final long blockSize;
-    final int replication;
-    final byte storagePolicyID;
-    Node clientNode = null;
-    String clientMachine = null;
-
+  LocatedBlock getAdditionalBlock(
+      String src, long fileId, String clientName, ExtendedBlock previous,
+      DatanodeInfo[] excludedNodes, String[] favoredNodes) throws IOException {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: "
           + src + " inodeId " +  fileId  + " for " + clientName);
     }
 
-    checkOperation(OperationCategory.READ);
-    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    waitForLoadingFSImage();
+    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
+    FSDirWriteFileOp.ValidateAddBlockResult r;
     FSPermissionChecker pc = getPermissionChecker();
+    checkOperation(OperationCategory.READ);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = dir.resolvePath(pc, src, pathComponents);
-      FileState fileState = analyzeFileState(
-          src, fileId, clientName, previous, onRetryBlock);
-      final INodeFile pendingFile = fileState.inode;
-      // Check if the penultimate block is minimally replicated
-      if (!checkFileProgress(src, pendingFile, false)) {
-        throw new NotReplicatedYetException("Not replicated yet: " + src);
-      }
-      src = fileState.path;
-
-      if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
-        // This is a retry. No need to generate new locations.
-        // Use the last block if it has locations.
-        return null;
-      }
-      if (pendingFile.getBlocks().length >= maxBlocksPerFile) {
-        throw new IOException("File has reached the limit on maximum number of"
-            + " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY
-            + "): " + pendingFile.getBlocks().length + " >= "
-            + maxBlocksPerFile);
-      }
-      blockSize = pendingFile.getPreferredBlockSize();
-      clientMachine = pendingFile.getFileUnderConstructionFeature()
-          .getClientMachine();
-      clientNode = blockManager.getDatanodeManager().getDatanodeByHost(
-          clientMachine);
-      replication = pendingFile.getFileReplication();
-      storagePolicyID = pendingFile.getStoragePolicyID();
+      r = FSDirWriteFileOp.validateAddBlock(this, pc, src, fileId, clientName,
+                                            previous, onRetryBlock);
     } finally {
       readUnlock();
     }
 
-    if (clientNode == null) {
-      clientNode = getClientNode(clientMachine);
+    if (r == null) {
+      assert onRetryBlock[0] != null : "Retry block is null";
+      // This is a retry. Just return the last block.
+      return onRetryBlock[0];
     }
 
-    // choose targets for the new block to be allocated.
-    return getBlockManager().chooseTarget4NewBlock( 
-        src, replication, clientNode, excludedNodes, blockSize, favoredNodes,
-        storagePolicyID);
-  }
+    DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock(
+        blockManager, src, excludedNodes, favoredNodes, r);
 
-  /**
-   * Part II of getAdditionalBlock().
-   * Should repeat the same analysis of the file state as in Part 1,
-   * but under the write lock.
-   * If the conditions still hold, then allocate a new block with
-   * the new targets, add it to the INode and to the BlocksMap.
-   */
-  LocatedBlock storeAllocatedBlock(String src, long fileId, String clientName,
-      ExtendedBlock previous, DatanodeStorageInfo[] targets) throws IOException {
-    Block newBlock = null;
-    long offset;
     checkOperation(OperationCategory.WRITE);
-    waitForLoadingFSImage();
     writeLock();
+    LocatedBlock lb;
     try {
       checkOperation(OperationCategory.WRITE);
-      // Run the full analysis again, since things could have changed
-      // while chooseTarget() was executing.
-      LocatedBlock[] onRetryBlock = new LocatedBlock[1];
-      FileState fileState = 
-          analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
-      final INodeFile pendingFile = fileState.inode;
-      src = fileState.path;
-
-      if (onRetryBlock[0] != null) {
-        if (onRetryBlock[0].getLocations().length > 0) {
-          // This is a retry. Just return the last block if having locations.
-          return onRetryBlock[0];
-        } else {
-          // add new chosen targets to already allocated block and return
-          BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
-          ((BlockInfoContiguousUnderConstruction) lastBlockInFile)
-              .setExpectedLocations(targets);
-          offset = pendingFile.computeFileSize();
-          return makeLocatedBlock(lastBlockInFile, targets, offset);
-        }
-      }
-
-      // commit the last block and complete it if it has minimum replicas
-      commitOrCompleteLastBlock(pendingFile, fileState.iip,
-                                ExtendedBlock.getLocalBlock(previous));
-
-      // allocate new block, record block locations in INode.
-      newBlock = createNewBlock();
-      INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
-      saveAllocatedBlock(src, inodesInPath, newBlock, targets);
-
-      persistNewBlock(src, pendingFile);
-      offset = pendingFile.computeFileSize();
+      lb = FSDirWriteFileOp.storeAllocatedBlock(
+          this, src, fileId, clientName, previous, targets);
     } finally {
       writeUnlock();
     }
     getEditLog().logSync();
-
-    // Return located block
-    return makeLocatedBlock(newBlock, targets, offset);
-  }
-
-  /*
-   * Resolve clientmachine address to get a network location path
-   */
-  private Node getClientNode(String clientMachine) {
-    List<String> hosts = new ArrayList<String>(1);
-    hosts.add(clientMachine);
-    List<String> rName = getBlockManager().getDatanodeManager()
-        .resolveNetworkLocation(hosts);
-    Node clientNode = null;
-    if (rName != null) {
-      // Able to resolve clientMachine mapping.
-      // Create a temp node to findout the rack local nodes
-      clientNode = new NodeBase(rName.get(0) + NodeBase.PATH_SEPARATOR_STR
-          + clientMachine);
-    }
-    return clientNode;
-  }
-
-  static class FileState {
-    public final INodeFile inode;
-    public final String path;
-    public final INodesInPath iip;
-
-    public FileState(INodeFile inode, String fullPath, INodesInPath iip) {
-      this.inode = inode;
-      this.path = fullPath;
-      this.iip = iip;
-    }
-  }
-
-  FileState analyzeFileState(String src,
-                                long fileId,
-                                String clientName,
-                                ExtendedBlock previous,
-                                LocatedBlock[] onRetryBlock)
-          throws IOException  {
-    assert hasReadLock();
-
-    checkBlock(previous);
-    onRetryBlock[0] = null;
-    checkNameNodeSafeMode("Cannot add block to " + src);
-
-    // have we exceeded the configured limit of fs objects.
-    checkFsObjectLimit();
-
-    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
-    final INode inode;
-    final INodesInPath iip;
-    if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
-      // Older clients may not have given us an inode ID to work with.
-      // In this case, we have to try to resolve the path and hope it
-      // hasn't changed or been deleted since the file was opened for write.
-      iip = dir.getINodesInPath4Write(src);
-      inode = iip.getLastINode();
-    } else {
-      // Newer clients pass the inode ID, so we can just get the inode
-      // directly.
-      inode = dir.getInode(fileId);
-      iip = INodesInPath.fromINode(inode);
-      if (inode != null) {
-        src = iip.getPath();
-      }
-    }
-    final INodeFile pendingFile = checkLease(src, clientName, inode, fileId);
-    BlockInfoContiguous lastBlockInFile = pendingFile.getLastBlock();
-    if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
-      // The block that the client claims is the current last block
-      // doesn't match up with what we think is the last block. There are
-      // four possibilities:
-      // 1) This is the first block allocation of an append() pipeline
-      //    which started appending exactly at or exceeding the block boundary.
-      //    In this case, the client isn't passed the previous block,
-      //    so it makes the allocateBlock() call with previous=null.
-      //    We can distinguish this since the last block of the file
-      //    will be exactly a full block.
-      // 2) This is a retry from a client that missed the response of a
-      //    prior getAdditionalBlock() call, perhaps because of a network
-      //    timeout, or because of an HA failover. In that case, we know
-      //    by the fact that the client is re-issuing the RPC that it
-      //    never began to write to the old block. Hence it is safe to
-      //    to return the existing block.
-      // 3) This is an entirely bogus request/bug -- we should error out
-      //    rather than potentially appending a new block with an empty
-      //    one in the middle, etc
-      // 4) This is a retry from a client that timed out while
-      //    the prior getAdditionalBlock() is still being processed,
-      //    currently working on chooseTarget(). 
-      //    There are no means to distinguish between the first and 
-      //    the second attempts in Part I, because the first one hasn't
-      //    changed the namesystem state yet.
-      //    We run this analysis again in Part II where case 4 is impossible.
-
-      BlockInfoContiguous penultimateBlock = pendingFile.getPenultimateBlock();
-      if (previous == null &&
-          lastBlockInFile != null &&
-          lastBlockInFile.getNumBytes() >= pendingFile.getPreferredBlockSize() &&
-          lastBlockInFile.isComplete()) {
-        // Case 1
-        if (NameNode.stateChangeLog.isDebugEnabled()) {
-           NameNode.stateChangeLog.debug(
-               "BLOCK* NameSystem.allocateBlock: handling block allocation" +
-               " writing to a file with a complete previous block: src=" +
-               src + " lastBlock=" + lastBlockInFile);
-        }
-      } else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
-        if (lastBlockInFile.getNumBytes() != 0) {
-          throw new IOException(
-              "Request looked like a retry to allocate block " +
-              lastBlockInFile + " but it already contains " +
-              lastBlockInFile.getNumBytes() + " bytes");
-        }
-
-        // Case 2
-        // Return the last block.
-        NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
-            "caught retry for allocation of a new block in " +
-            src + ". Returning previously allocated block " + lastBlockInFile);
-        long offset = pendingFile.computeFileSize();
-        onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
-            ((BlockInfoContiguousUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
-            offset);
-        return new FileState(pendingFile, src, iip);
-      } else {
-        // Case 3
-        throw new IOException("Cannot allocate block in " + src + ": " +
-            "passed 'previous' block " + previous + " does not match actual " +
-            "last block in file " + lastBlockInFile);
-      }
-    }
-    return new FileState(pendingFile, src, iip);
-  }
-
-  LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
-                                        long offset) throws IOException {
-    LocatedBlock lBlk = BlockManager.newLocatedBlock(
-        getExtendedBlock(blk), locs, offset, false);
-    getBlockManager().setBlockToken(
-        lBlk, BlockTokenIdentifier.AccessMode.WRITE);
-    return lBlk;
+    return lb;
   }
 
   /** @see ClientProtocol#getAdditionalDatanode */
@@ -3378,7 +3136,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
 
     if (clientnode == null) {
-      clientnode = getClientNode(clientMachine);
+      clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine);
     }
 
     // choose new datanodes.
@@ -3394,60 +3152,32 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   /**
    * The client would like to let go of the given block
    */
-  boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
+  void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
       throws IOException {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b
           + "of file " + src);
     }
+    waitForLoadingFSImage();
     checkOperation(OperationCategory.WRITE);
-    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     FSPermissionChecker pc = getPermissionChecker();
-    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src);
-      src = dir.resolvePath(pc, src, pathComponents);
-
-      final INode inode;
-      final INodesInPath iip;
-      if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
-        // Older clients may not have given us an inode ID to work with.
-        // In this case, we have to try to resolve the path and hope it
-        // hasn't changed or been deleted since the file was opened for write.
-        iip = dir.getINodesInPath(src, true);
-        inode = iip.getLastINode();
-      } else {
-        inode = dir.getInode(fileId);
-        iip = INodesInPath.fromINode(inode);
-        if (inode != null) {
-          src = iip.getPath();
-        }
-      }
-      final INodeFile file = checkLease(src, holder, inode, fileId);
-
-      // Remove the block from the pending creates list
-      boolean removed = dir.removeBlock(src, iip, file,
-          ExtendedBlock.getLocalBlock(b));
-      if (!removed) {
-        return true;
-      }
+      FSDirWriteFileOp.abandonBlock(dir, pc, b, fileId, src, holder);
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
                                       + b + " is removed from pendingCreates");
       }
-      persistBlocks(src, file, false);
     } finally {
       writeUnlock();
     }
     getEditLog().logSync();
-
-    return true;
   }
 
-  private INodeFile checkLease(String src, String holder, INode inode,
-      long fileId) throws LeaseExpiredException, FileNotFoundException {
+  INodeFile checkLease(
+      String src, String holder, INode inode, long fileId) throws LeaseExpiredException, FileNotFoundException {
     assert hasReadLock();
     final String ident = src + " (inode " + fileId + ")";
     if (inode == null) {
@@ -3492,120 +3222,30 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    *         (e.g if not all blocks have reached minimum replication yet)
    * @throws IOException on error (eg lease mismatch, file not open, file deleted)
    */
-  boolean completeFile(final String srcArg, String holder,
+  boolean completeFile(final String src, String holder,
                        ExtendedBlock last, long fileId)
-    throws SafeModeException, UnresolvedLinkException, IOException {
-    String src = srcArg;
-    if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " +
-          src + " for " + holder);
-    }
-    checkBlock(last);
+    throws IOException {
     boolean success = false;
     checkOperation(OperationCategory.WRITE);
-    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
-    FSPermissionChecker pc = getPermissionChecker();
     waitForLoadingFSImage();
+    FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot complete file " + src);
-      src = dir.resolvePath(pc, src, pathComponents);
-      success = completeFileInternal(src, holder,
-        ExtendedBlock.getLocalBlock(last), fileId);
+      success = FSDirWriteFileOp.completeFile(this, pc, src, holder, last,
+                                              fileId);
     } finally {
       writeUnlock();
     }
     getEditLog().logSync();
-    if (success) {
-      NameNode.stateChangeLog.info("DIR* completeFile: " + srcArg
-          + " is closed by " + holder);
-    }
     return success;
   }
 
-  private boolean completeFileInternal(String src, String holder, Block last,
-      long fileId) throws IOException {
-    assert hasWriteLock();
-    final INodeFile pendingFile;
-    final INodesInPath iip;
-    INode inode = null;
-    try {
-      if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
-        // Older clients may not have given us an inode ID to work with.
-        // In this case, we have to try to resolve the path and hope it
-        // hasn't changed or been deleted since the file was opened for write.
-        iip = dir.getINodesInPath(src, true);
-        inode = iip.getLastINode();
-      } else {
-        inode = dir.getInode(fileId);
-        iip = INodesInPath.fromINode(inode);
-        if (inode != null) {
-          src = iip.getPath();
-        }
-      }
-      pendingFile = checkLease(src, holder, inode, fileId);
-    } catch (LeaseExpiredException lee) {
-      if (inode != null && inode.isFile() &&
-          !inode.asFile().isUnderConstruction()) {
-        // This could be a retry RPC - i.e the client tried to close
-        // the file, but missed the RPC response. Thus, it is trying
-        // again to close the file. If the file still exists and
-        // the client's view of the last block matches the actual
-        // last block, then we'll treat it as a successful close.
-        // See HDFS-3031.
-        final Block realLastBlock = inode.asFile().getLastBlock();
-        if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
-          NameNode.stateChangeLog.info("DIR* completeFile: " +
-              "request from " + holder + " to complete inode " + fileId +
-              "(" + src + ") which is already closed. But, it appears to be " +
-              "an RPC retry. Returning success");
-          return true;
-        }
-      }
-      throw lee;
-    }
-    // Check the state of the penultimate block. It should be completed
-    // before attempting to complete the last one.
-    if (!checkFileProgress(src, pendingFile, false)) {
-      return false;
-    }
-
-    // commit the last block and complete it if it has minimum replicas
-    commitOrCompleteLastBlock(pendingFile, iip, last);
-
-    if (!checkFileProgress(src, pendingFile, true)) {
-      return false;
-    }
-
-    finalizeINodeFileUnderConstruction(src, pendingFile,
-        Snapshot.CURRENT_STATE_ID);
-    return true;
-  }
-
-  /**
-   * Save allocated block at the given pending filename
-   * 
-   * @param src path to the file
-   * @param inodesInPath representing each of the components of src.
-   *                     The last INode is the INode for {@code src} file.
-   * @param newBlock newly allocated block to be save
-   * @param targets target datanodes where replicas of the new block is placed
-   * @throws QuotaExceededException If addition of block exceeds space quota
-   */
-  private void saveAllocatedBlock(String src, INodesInPath inodesInPath,
-      Block newBlock, DatanodeStorageInfo[] targets)
-      throws IOException {
-    assert hasWriteLock();
-    BlockInfoContiguous b = dir.addBlock(src, inodesInPath, newBlock, targets);
-    NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
-    DatanodeStorageInfo.incrementBlocksScheduled(targets);
-  }
-
   /**
    * Create new block with a unique block id and a new generation stamp.
    */
-  private Block createNewBlock() throws IOException {
+  Block createNewBlock() throws IOException {
     assert hasWriteLock();
     Block b = new Block(nextBlockId(), 0, 0);
     // Increment the generation stamp for every new block.
@@ -3997,7 +3637,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
             pendingFile, lastBlockLength);
       }
-      persistBlocks(src, pendingFile, false);
+      FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, false);
     } finally {
       writeUnlock();
     }
@@ -4167,8 +3807,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return leaseManager.reassignLease(lease, pendingFile, newHolder);
   }
 
-  private void commitOrCompleteLastBlock(final INodeFile fileINode,
-      final INodesInPath iip, final Block commitBlock) throws IOException {
+  void commitOrCompleteLastBlock(
+      final INodeFile fileINode, final INodesInPath iip,
+      final Block commitBlock) throws IOException {
     assert hasWriteLock();
     Preconditions.checkArgument(fileINode.isUnderConstruction());
     if (!blockManager.commitOrCompleteLastBlock(fileINode, commitBlock)) {
@@ -4186,14 +3827,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
-  private void finalizeINodeFileUnderConstruction(String src,
-      INodeFile pendingFile, int latestSnapshot) throws IOException {
+  void finalizeINodeFileUnderConstruction(
+      String src, INodeFile pendingFile, int latestSnapshot) throws IOException {
     assert hasWriteLock();
 
     FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
     Preconditions.checkArgument(uc != null);
     leaseManager.removeLease(uc.getClientName(), pendingFile);
-    
+
     pendingFile.recordModification(latestSnapshot);
 
     // The file is no longer pending.
@@ -4405,7 +4046,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       } else {
         // If this commit does not want to close the file, persist blocks
         src = iFile.getFullPathName();
-        persistBlocks(src, iFile, false);
+        FSDirWriteFileOp.persistBlocks(dir, src, iFile, false);
       }
     } finally {
       writeUnlock();
@@ -4596,24 +4237,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
-   * Persist the block list for the inode.
-   * @param path
-   * @param file
-   * @param logRetryCache
-   */
-  private void persistBlocks(String path, INodeFile file,
-                             boolean logRetryCache) {
-    assert hasWriteLock();
-    Preconditions.checkArgument(file.isUnderConstruction());
-    getEditLog().logUpdateBlocks(path, file, logRetryCache);
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("persistBlocks: " + path
-              + " with " + file.getBlocks().length + " blocks is persisted to" +
-              " the file system");
-    }
-  }
-
-  /**
    * Close file.
    * @param path
    * @param file
@@ -4800,13 +4423,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   public FSEditLog getEditLog() {
     return getFSImage().getEditLog();
-  }    
-
-  private void checkBlock(ExtendedBlock block) throws IOException {
-    if (block != null && !this.blockPoolId.equals(block.getBlockPoolId())) {
-      throw new IOException("Unexpected BlockPoolId " + block.getBlockPoolId()
-          + " - expected " + blockPoolId);
-    }
   }
 
   @Metric({"MissingBlocks", "Number of missing blocks"})
@@ -5080,21 +4696,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
-   * Persist the new block (the last block of the given file).
-   * @param path
-   * @param file
-   */
-  private void persistNewBlock(String path, INodeFile file) {
-    Preconditions.checkArgument(file.isUnderConstruction());
-    getEditLog().logAddBlock(path, file);
-    if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("persistNewBlock: "
-              + path + " with new block " + file.getLastBlock().toString()
-              + ", current total block count is " + file.getBlocks().length);
-    }
-  }
-
-  /**
    * SafeModeInfo contains information related to the safe mode.
    * <p>
    * An instance of {@link SafeModeInfo} is created when the name node
@@ -6399,7 +6000,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     blockinfo.setExpectedLocations(storages);
 
     String src = pendingFile.getFullPathName();
-    persistBlocks(src, pendingFile, logRetryCache);
+    FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, logRetryCache);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 3311609..0d416a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -713,23 +713,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
       String[] favoredNodes)
       throws IOException {
     checkNNStartup();
-    if (stateChangeLog.isDebugEnabled()) {
-      stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
-          + " fileId=" + fileId + " for " + clientName);
-    }
-    Set<Node> excludedNodesSet = null;
-    if (excludedNodes != null) {
-      excludedNodesSet = new HashSet<Node>(excludedNodes.length);
-      for (Node node : excludedNodes) {
-        excludedNodesSet.add(node);
-      }
-    }
-    List<String> favoredNodesList = (favoredNodes == null) ? null
-        : Arrays.asList(favoredNodes);
     LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
-        clientName, previous, excludedNodesSet, favoredNodesList);
-    if (locatedBlock != null)
+        clientName, previous, excludedNodes, favoredNodes);
+    if (locatedBlock != null) {
       metrics.incrAddBlockOps();
+    }
     return locatedBlock;
   }
 
@@ -770,13 +758,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void abandonBlock(ExtendedBlock b, long fileId, String src,
         String holder) throws IOException {
     checkNNStartup();
-    if(stateChangeLog.isDebugEnabled()) {
-      stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
-          +b+" of file "+src);
-    }
-    if (!namesystem.abandonBlock(b, fileId, src, holder)) {
-      throw new IOException("Cannot abandon block during write to " + src);
-    }
+    namesystem.abandonBlock(b, fileId, src, holder);
   }
 
   @Override // ClientProtocol
@@ -784,10 +766,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
                           ExtendedBlock last,  long fileId)
       throws IOException {
     checkNNStartup();
-    if(stateChangeLog.isDebugEnabled()) {
-      stateChangeLog.debug("*DIR* NameNode.complete: "
-          + src + " fileId=" + fileId +" for " + clientName);
-    }
     return namesystem.completeFile(src, clientName, last, fileId);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
index 5a4134c..c92e79b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Race between two threads simultaneously calling
@@ -88,25 +89,40 @@ public class TestAddBlockRetry {
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
-    DatanodeStorageInfo targets[] = ns.getNewBlockTargets(
-        src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName",
-        null, null, null, onRetryBlock);
+    ns.readLock();
+    FSDirWriteFileOp.ValidateAddBlockResult r;
+    FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
+    try {
+      r = FSDirWriteFileOp.validateAddBlock(ns, pc, src,
+                                            HdfsConstants.GRANDFATHER_INODE_ID,
+                                            "clientName", null, onRetryBlock);
+    } finally {
+      ns.readUnlock();;
+    }
+    DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
+        ns.getBlockManager(), src, null, null, r);
     assertNotNull("Targets must be generated", targets);
 
     // run second addBlock()
     LOG.info("Starting second addBlock for " + src);
     nn.addBlock(src, "clientName", null, null,
-        HdfsConstants.GRANDFATHER_INODE_ID, null);
+                HdfsConstants.GRANDFATHER_INODE_ID, null);
     assertTrue("Penultimate block must be complete",
-        checkFileProgress(src, false));
+               checkFileProgress(src, false));
     LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
     assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
     LocatedBlock lb2 = lbs.get(0);
     assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
 
     // continue first addBlock()
-    LocatedBlock newBlock = ns.storeAllocatedBlock(
-        src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
+    ns.writeLock();
+    LocatedBlock newBlock;
+    try {
+      newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
+          HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
+    } finally {
+      ns.writeUnlock();
+    }
     assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
 
     // check locations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5afac58/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index 3049612..ea560fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderCon
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 import java.io.IOException;
 
@@ -45,7 +46,9 @@ public class TestCommitBlockSynchronization {
   private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
       throws IOException {
     Configuration conf = new Configuration();
+    FSEditLog editlog = mock(FSEditLog.class);
     FSImage image = new FSImage(conf);
+    Whitebox.setInternalState(image, "editLog", editlog);
     final DatanodeStorageInfo[] targets = {};
 
     FSNamesystem namesystem = new FSNamesystem(conf, image);


[16/50] [abbrv] hadoop git commit: HADOOP-10582. Fix the test case for copying to non-existent dir in TestFsShellCopy. Contributed by Kousuke Saruta.

Posted by ji...@apache.org.
HADOOP-10582. Fix the test case for copying to non-existent dir in TestFsShellCopy. Contributed by Kousuke Saruta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a46506d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a46506d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a46506d9

Branch: refs/heads/HDFS-7240
Commit: a46506d99cb1310c0e446d590f36fb9afae0fa60
Parents: f5c4823
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon May 18 16:31:41 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 18 16:31:41 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                | 3 +++
 .../src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java    | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a46506d9/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7349091..2138334 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -695,6 +695,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11988. Fix typo in the document for hadoop fs -find.
     (Kengo Seki via aajisaka)
 
+    HADOOP-10582. Fix the test case for copying to non-existent dir in
+    TestFsShellCopy. (Kousuke Saruta via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a46506d9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
index bef0c9f..c0a6b20 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
@@ -177,9 +177,9 @@ public class TestFsShellCopy {
       checkPut(0, srcPath, dstPath, useWindowsPath);
     }
 
-    // copy to non-existent subdir
-    prepPut(childPath, false, false);
-    checkPut(1, srcPath, dstPath, useWindowsPath);
+    // copy to non-existent dir
+    prepPut(dstPath, false, false);
+    checkPut(1, srcPath, childPath, useWindowsPath);
 
     // copy into dir, then with another name
     prepPut(dstPath, true, true);


[48/50] [abbrv] hadoop git commit: Move YARN-2918 from 2.8.0 to 2.7.1

Posted by ji...@apache.org.
Move YARN-2918 from 2.8.0 to 2.7.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03f897fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03f897fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03f897fd

Branch: refs/heads/HDFS-7240
Commit: 03f897fd1a3779251023bae358207069b89addbf
Parents: 4aa730c
Author: Wangda Tan <wa...@apache.org>
Authored: Wed May 20 13:49:10 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Wed May 20 13:49:10 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03f897fd/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4bd4132..d1da808 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -385,9 +385,6 @@ Release 2.8.0 - UNRELEASED
     YARN-3584. Fixed attempt diagnostics format shown on the UI. (nijel via
     jianhe)
 
-    YARN-2918. RM should not fail on startup if queue's configured labels do
-    not exist in cluster-node-labels. (Wangda Tan via jianhe)
-
     YARN-1832. Fix wrong MockLocalizerStatus#equals implementation.
     (Hong Zhiguo via aajisaka)
 
@@ -526,6 +523,9 @@ Release 2.7.1 - UNRELEASED
     YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager.
     (Vinod Kumar Vavilapalli via ozawa)
 
+    YARN-2918. RM should not fail on startup if queue's configured labels do
+    not exist in cluster-node-labels. (Wangda Tan via jianhe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES


[21/50] [abbrv] hadoop git commit: HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)

Posted by ji...@apache.org.
HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/060c84ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/060c84ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/060c84ea

Branch: refs/heads/HDFS-7240
Commit: 060c84ea86257e3dea2f834aac7ae27b1456c434
Parents: bcc1786
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 18 17:06:31 2015 +0000
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Mon May 18 17:06:31 2015 +0000

----------------------------------------------------------------------
 dev-support/test-patch.sh                       | 27 +++++++++++++++++---
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 2 files changed, 25 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/060c84ea/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index ae74c5b..57fd657 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -40,6 +40,9 @@ function setup_defaults
   BASEDIR=$(pwd)
   RELOCATE_PATCH_DIR=false
 
+  USER_PLUGIN_DIR=""
+  LOAD_SYSTEM_PLUGINS=true
+
   FINDBUGS_HOME=${FINDBUGS_HOME:-}
   ECLIPSE_HOME=${ECLIPSE_HOME:-}
   BUILD_NATIVE=${BUILD_NATIVE:-true}
@@ -586,9 +589,11 @@ function hadoop_usage
   echo "--modulelist=<list>    Specify additional modules to test (comma delimited)"
   echo "--offline              Avoid connecting to the Internet"
   echo "--patch-dir=<dir>      The directory for working and output files (default '/tmp/${PROJECT_NAME}-test-patch/pid')"
+  echo "--plugins=<dir>        A directory of user provided plugins. see test-patch.d for examples (default empty)"
   echo "--project=<name>       The short name for project currently using test-patch (default 'hadoop')"
   echo "--resetrepo            Forcibly clean the repo"
   echo "--run-tests            Run all relevant tests below the base directory"
+  echo "--skip-system-plugins  Do not load plugins from ${BINDIR}/test-patch.d"
   echo "--testlist=<list>      Specify which subsystem tests to use (comma delimited)"
 
   echo "Shell binary overrides:"
@@ -706,6 +711,9 @@ function parse_args
       --patch-dir=*)
         USER_PATCH_DIR=${i#*=}
       ;;
+      --plugins=*)
+        USER_PLUGIN_DIR=${i#*=}
+      ;;
       --project=*)
         PROJECT_NAME=${i#*=}
       ;;
@@ -723,6 +731,9 @@ function parse_args
       --run-tests)
         RUN_TESTS=true
       ;;
+      --skip-system-plugins)
+        LOAD_SYSTEM_PLUGINS=false
+      ;;
       --testlist=*)
         testlist=${i#*=}
         testlist=${testlist//,/ }
@@ -2523,17 +2534,25 @@ function runtests
   done
 }
 
-## @description  Import content from test-patch.d
+## @description  Import content from test-patch.d and optionally
+## @description  from user provided plugin directory
 ## @audience     private
 ## @stability    evolving
 ## @replaceable  no
 function importplugins
 {
   local i
-  local files
+  local files=()
+
+  if [[ ${LOAD_SYSTEM_PLUGINS} == "true" ]]; then
+    if [[ -d "${BINDIR}/test-patch.d" ]]; then
+      files=(${BINDIR}/test-patch.d/*.sh)
+    fi
+  fi
 
-  if [[ -d "${BINDIR}/test-patch.d" ]]; then
-    files=(${BINDIR}/test-patch.d/*.sh)
+  if [[ -n "${USER_PLUGIN_DIR}" && -d "${USER_PLUGIN_DIR}" ]]; then
+    hadoop_debug "Loading user provided plugins from ${USER_PLUGIN_DIR}"
+    files=("${files[@]}" ${USER_PLUGIN_DIR}/*.sh)
   fi
 
   for i in "${files[@]}"; do

http://git-wip-us.apache.org/repos/asf/hadoop/blob/060c84ea/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8f66072..324434b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -486,6 +486,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11843. Make setting up the build environment easier.
     (Niels Basjes via cnauroth)
 
+    HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)
+
   IMPROVEMENTS
 
     HADOOP-6842. "hadoop fs -text" does not give a useful text representation


[02/50] [abbrv] hadoop git commit: HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
deleted file mode 100644
index c8e0c62..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
+++ /dev/null
@@ -1,1577 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
-
-<document>
-  <header>
-    <title>WebHDFS REST API</title>
-  </header>
-
-  <body>
-    <section>
-      <title>Document Conventions</title>
-<table>
-<tr><td><code>Monospaced</code></td><td>Used for commands, HTTP request and responses and code blocks.</td></tr>
-<tr><td><code>&lt;Monospaced&gt;</code></td><td>User entered values.</td></tr>
-<tr><td><code>[Monospaced]</code></td><td>Optional values.  When the value is not specified, the default value is used.</td></tr>
-<tr><td><em>Italics</em></td><td>Important phrases and words.</td></tr>
-</table>
-    </section>
-<!-- ***************************************************************************** -->
-    <section>
-      <title>Introduction</title>
-<p>
-  The HTTP REST API supports the complete FileSystem interface for HDFS.
-  The operations and the corresponding FileSystem methods are shown in the next section.
-  The Section <a href="#ParameterDictionary">HTTP Query Parameter Dictionary</a> specifies the parameter details
-  such as the defaults and the valid values.
-</p>
-      <section id="Operations">
-        <title>Operations</title>
-<ul>
-  <li>HTTP GET
-  <ul>
-    <li><a href="#OPEN"><code>OPEN</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/open">FileSystem.open</a>)
-    </li>
-    <li><a href="#GETFILESTATUS"><code>GETFILESTATUS</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/getFileStatus">FileSystem.getFileStatus</a>)
-    </li>
-    <li><a href="#LISTSTATUS"><code>LISTSTATUS</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/listStatus">FileSystem.listStatus</a>)
-    </li>
-    <li><a href="#GETCONTENTSUMMARY"><code>GETCONTENTSUMMARY</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/getContentSummary">FileSystem.getContentSummary</a>)
-    </li>
-    <li><a href="#GETFILECHECKSUM"><code>GETFILECHECKSUM</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/getFileChecksum">FileSystem.getFileChecksum</a>)
-    </li>
-    <li><a href="#GETHOMEDIRECTORY"><code>GETHOMEDIRECTORY</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/getHomeDirectory">FileSystem.getHomeDirectory</a>)
-    </li>
-    <li><a href="#GETDELEGATIONTOKEN"><code>GETDELEGATIONTOKEN</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/getDelegationToken">FileSystem.getDelegationToken</a>)
-    </li>
-  </ul></li>
-  <li>HTTP PUT
-  <ul>
-    <li><a href="#CREATE"><code>CREATE</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/create">FileSystem.create</a>)
-    </li>
-    <li><a href="#MKDIRS"><code>MKDIRS</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/mkdirs">FileSystem.mkdirs</a>)
-    </li>
-    <li><a href="#RENAME"><code>RENAME</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/rename">FileSystem.rename</a>)
-    </li>
-    <li><a href="#SETREPLICATION"><code>SETREPLICATION</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/setReplication">FileSystem.setReplication</a>)
-    </li>
-    <li><a href="#SETOWNER"><code>SETOWNER</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/setOwner">FileSystem.setOwner</a>)
-    </li>
-    <li><a href="#SETPERMISSION"><code>SETPERMISSION</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/setPermission">FileSystem.setPermission</a>)
-    </li>
-    <li><a href="#SETTIMES"><code>SETTIMES</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/setTimes">FileSystem.setTimes</a>)
-    </li>
-    <li><a href="#RENEWDELEGATIONTOKEN"><code>RENEWDELEGATIONTOKEN</code></a>
-        (see DistributedFileSystem.renewDelegationToken)
-    </li>
-    <li><a href="#CANCELDELEGATIONTOKEN"><code>CANCELDELEGATIONTOKEN</code></a>
-        (see DistributedFileSystem.cancelDelegationToken)
-    </li>
-  </ul></li>
-  <li>HTTP POST
-  <ul>
-    <li><a href="#APPEND"><code>APPEND</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/append">FileSystem.append</a>)
-    </li>
-  </ul></li>
-  <li>HTTP DELETE
-  <ul>
-    <li><a href="#DELETE"><code>DELETE</code></a>
-        (see <a href="ext:api/org/apache/hadoop/fs/filesystem/delete">FileSystem.delete</a>)
-    </li>
-  </ul></li>
-</ul>
-
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="FsURIvsHTTP_URL">
-        <title>FileSystem URIs vs HTTP URLs</title>
-<p>
-  The FileSystem scheme of WebHDFS is "<code>webhdfs://</code>".
-  A WebHDFS FileSystem URI has the following format.
-</p>
-<source>
-  webhdfs://&lt;HOST&gt;:&lt;HTTP_PORT&gt;/&lt;PATH&gt;
-</source>
-<p>
-  The above WebHDFS URI corresponds to the below HDFS URI.
-</p>
-<source>
-  hdfs://&lt;HOST&gt;:&lt;RPC_PORT&gt;/&lt;PATH&gt;
-</source>
-<p>
-  In the REST API, the prefix "<code>/webhdfs/v1</code>" is inserted in the path and a query is appended at the end.
-  Therefore, the corresponding HTTP URL has the following format.
-</p>
-<source>
-  http://&lt;HOST&gt;:&lt;HTTP_PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=...
-</source>
-      </section>
-<!-- ***************************************************************************** -->
-      <section>
-        <title>HDFS Configuration Options</title>
-<p>
-  Below are the HDFS configuration options for WebHDFS.
-</p>
-<table>
-<tr><th>Property Name</th><th>Description</th></tr>
-<tr><td><code>dfs.webhdfs.enabled</code></td>
-<td>Enable/disable WebHDFS in Namenodes and Datanodes
-</td></tr>
-<tr><td><code>dfs.web.authentication.kerberos.principal</code></td>
-<td>The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-    HTTP SPNEGO specification.
-</td></tr>
-<tr><td><code>dfs.web.authentication.kerberos.keytab</code></td>
-<td>The Kerberos keytab file with the credentials for the
-    HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-</td></tr>
-</table>
-      </section>
-    </section>
-<!-- ***************************************************************************** -->
-    <section id="Authentication">
-      <title>Authentication</title>
-<p>
-  When security is <em>off</em>, the authenticated user is the username specified in the <code>user.name</code> query parameter.
-  If the <code>user.name</code> parameter is not set,
-  the server may either set the authenticated user to a default web user, if there is any, or return an error response.
-</p>
-<p>
-  When security is <em>on</em>, authentication is performed by either Hadoop delegation token or Kerberos SPNEGO.
-  If a token is set in the <code>delegation</code> query parameter, the authenticated user is the user encoded in the token.
-  If the <code>delegation</code> parameter is not set, the user is authenticated by Kerberos SPNEGO.
-</p>
-<p>
-Below are examples using the <code>curl</code> command tool.
-</p>
-<ol>
-  <li>
-    Authentication when security is off:
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?[user.name=&lt;USER&gt;&amp;]op=..."
-    </source>
-  </li><li>
-    Authentication using Kerberos SPNEGO when security is on:
-    <source>
-curl -i --negotiate -u : "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=..."
-    </source>
-  </li><li>
-    Authentication using Hadoop delegation token when security is on:
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?delegation=&lt;TOKEN&gt;&amp;op=..."
-    </source>
-  </li>
-</ol>
-    </section>
-<!-- ***************************************************************************** -->
-    <section id="ProxyUsers">
-      <title>Proxy Users</title>
-<p>
-  When the proxy user feature is enabled, a proxy user <em>P</em> may submit a request on behalf of another user <em>U</em>.
-  The username of <em>U</em> must be specified in the <code>doas</code> query parameter unless a delegation token is presented in authentication.
-  In such case, the information of both users <em>P</em> and <em>U</em> must be encoded in the delegation token.
-</p>
-<ol>
-  <li>
-    A proxy request when security is off:
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?[user.name=&lt;USER&gt;&amp;]doas=&lt;USER&gt;&amp;op=..."
-    </source>
-  </li><li>
-    A proxy request using Kerberos SPNEGO when security is on:
-    <source>
-curl -i --negotiate -u : "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?doas=&lt;USER&gt;&amp;op=..."
-    </source>
-  </li><li>
-    A proxy request using Hadoop delegation token when security is on:
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?delegation=&lt;TOKEN&gt;&amp;op=..."
-    </source>
-  </li>
-</ol>
-    </section>
-<!-- ***************************************************************************** -->
-<!-- ***************************************************************************** -->
-    <section>
-      <title>File and Directory Operations</title>
-      <section id="CREATE">
-        <title>Create and Write to a File</title>
-<ul>
-  <li>Step 1: Submit a HTTP PUT request without automatically following redirects and without sending the file data.
-    <source>
-curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=CREATE
-                    [&amp;overwrite=&lt;true|false&gt;][&amp;blocksize=&lt;LONG&gt;][&amp;replication=&lt;SHORT&gt;]
-                    [&amp;permission=&lt;OCTAL&gt;][&amp;buffersize=&lt;INT&gt;]"
-    </source>
-The request is redirected to a datanode where the file data is to be written:
-    <source>
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://&lt;DATANODE&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=CREATE...
-Content-Length: 0
-    </source>
-  </li>
-  <li>Step 2: Submit another HTTP PUT request using the URL in the <code>Location</code> header with the file data to be written.
-    <source>
-curl -i -X PUT -T &lt;LOCAL_FILE&gt; "http://&lt;DATANODE&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=CREATE..."
-    </source>
-The client receives a <code>201 Created</code> response with zero content length
-and the WebHDFS URI of the file in the <code>Location</code> header:
-    <source>
-HTTP/1.1 201 Created
-Location: webhdfs://&lt;HOST&gt;:&lt;PORT&gt;/&lt;PATH&gt;
-Content-Length: 0
-    </source>
-  </li>
-</ul>
-<p>
-  <strong>Note</strong> that the reason of having two-step create/append is
-  for preventing clients to send out data before the redirect.
-  This issue is addressed by the "<code>Expect: 100-continue</code>" header in HTTP/1.1;
-  see <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html#sec8.2.3">RFC 2616, Section 8.2.3</a>.
-  Unfortunately, there are software library bugs (e.g. Jetty 6 HTTP server and Java 6 HTTP client),
-  which do not correctly implement "<code>Expect: 100-continue</code>".
-  The two-step create/append is a temporary workaround for the software library bugs.
-</p>
-<p>
-  See also:
-  <a href="#overwrite"><code>overwrite</code></a>,
-  <a href="#blocksize"><code>blocksize</code></a>,
-  <a href="#replication"><code>replication</code></a>,
-  <a href="#permission"><code>permission</code></a>,
-  <a href="#buffersize"><code>buffersize</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/create">FileSystem.create</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="APPEND">
-        <title>Append to a File</title>
-<ul>
-  <li>Step 1: Submit a HTTP POST request without automatically following redirects and without sending the file data.
-    <source>
-curl -i -X POST "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=APPEND[&amp;buffersize=&lt;INT&gt;]"
-    </source>
-The request is redirected to a datanode where the file data is to be appended:
-    <source>
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://&lt;DATANODE&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=APPEND...
-Content-Length: 0
-    </source>
-  </li>
-  <li>Step 2: Submit another HTTP POST request using the URL in the <code>Location</code> header with the file data to be appended.
-    <source>
-curl -i -X POST -T &lt;LOCAL_FILE&gt; "http://&lt;DATANODE&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=APPEND..."
-    </source>
-The client receives a response with zero content length:
-    <source>
-HTTP/1.1 200 OK
-Content-Length: 0
-    </source>
-  </li>
-</ul>
-<p><em>
-  See the note in the previous section for the description of why this operation requires two steps.
-</em></p>
-<p>
-  See also:
-  <a href="#buffersize"><code>buffersize</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/append">FileSystem.append</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="OPEN">
-        <title>Open and Read a File</title>
-<ul>
-  <li>Submit a HTTP GET request with automatically following redirects.
-    <source>
-curl -i -L "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=OPEN
-                    [&amp;offset=&lt;LONG&gt;][&amp;length=&lt;LONG&gt;][&amp;buffersize=&lt;INT&gt;]"
-    </source>
-The request is redirected to a datanode where the file data can be read:
-    <source>
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://&lt;DATANODE&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=OPEN...
-Content-Length: 0
-    </source>
-The client follows the redirect to the datanode and receives the file data:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/octet-stream
-Content-Length: 22
-
-Hello, webhdfs user!
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#offset"><code>offset</code></a>,
-  <a href="#length"><code>length</code></a>,
-  <a href="#buffersize"><code>buffersize</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/open">FileSystem.open</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="MKDIRS">
-        <title>Make a Directory</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-        curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=MKDIRS[&amp;permission=&lt;OCTAL&gt;]"
-    </source>
-The client receives a response with a <a href="#boolean"><code>boolean</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#permission"><code>permission</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/mkdirs">FileSystem.mkdirs</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="RENAME">
-        <title>Rename a File/Directory</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-curl -i -X PUT "&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=RENAME&amp;destination=&lt;PATH&gt;"
-    </source>
-The client receives a response with a <a href="#boolean"><code>boolean</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#destination"><code>destination</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/rename">FileSystem.rename</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="DELETE">
-        <title>Delete a File/Directory</title>
-<ul>
-  <li>Submit a HTTP DELETE request.
-    <source>
-curl -i -X DELETE "http://&lt;host&gt;:&lt;port&gt;/webhdfs/v1/&lt;path&gt;?op=DELETE
-                              [&amp;recursive=&lt;true|false&gt;]"
-    </source>
-The client receives a response with a <a href="#boolean"><code>boolean</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#recursive"><code>recursive</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/delete">FileSystem.delete</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="GETFILESTATUS">
-        <title>Status of a File/Directory</title>
-<ul>
-  <li>Submit a HTTP GET request.
-    <source>
-curl -i  "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=GETFILESTATUS"
-    </source>
-The client receives a response with a <a href="#FileStatus"><code>FileStatus</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "FileStatus":
-  {
-    "accessTime"      : 0,
-    "blockSize"       : 0,
-    "group"           : "supergroup",
-    "length"          : 0,             //in bytes, zero for directories
-    "modificationTime": 1320173277227,
-    "owner"           : "webuser",
-    "pathSuffix"      : "",
-    "permission"      : "777",
-    "replication"     : 0,
-    "type"            : "DIRECTORY"    //enum {FILE, DIRECTORY, SYMLINK}
-  }
-}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/getFileStatus">FileSystem.getFileStatus</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="LISTSTATUS">
-        <title>List a Directory</title>
-<ul>
-  <li>Submit a HTTP GET request.
-    <source>
-curl -i  "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=LISTSTATUS"
-    </source>
-The client receives a response with a <a href="#FileStatuses"><code>FileStatuses</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Content-Length: 427
-
-{
-  "FileStatuses":
-  {
-    "FileStatus":
-    [
-      {
-        "accessTime"      : 1320171722771,
-        "blockSize"       : 33554432,
-        "group"           : "supergroup",
-        "length"          : 24930,
-        "modificationTime": 1320171722771,
-        "owner"           : "webuser",
-        "pathSuffix"      : "a.patch",
-        "permission"      : "644",
-        "replication"     : 1,
-        "type"            : "FILE"
-      },
-      {
-        "accessTime"      : 0,
-        "blockSize"       : 0,
-        "group"           : "supergroup",
-        "length"          : 0,
-        "modificationTime": 1320895981256,
-        "owner"           : "szetszwo",
-        "pathSuffix"      : "bar",
-        "permission"      : "711",
-        "replication"     : 0,
-        "type"            : "DIRECTORY"
-      },
-      ...
-    ]
-  }
-}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/listStatus">FileSystem.listStatus</a>
-</p>
-      </section>
-    </section>
-<!-- ***************************************************************************** -->
-<!-- ***************************************************************************** -->
-    <section>
-      <title>Other File System Operations</title>
-      <section id="GETCONTENTSUMMARY">
-        <title>Get Content Summary of a Directory</title>
-<ul>
-  <li>Submit a HTTP GET request.
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=GETCONTENTSUMMARY"
-    </source>
-The client receives a response with a <a href="#ContentSummary"><code>ContentSummary</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "ContentSummary":
-  {
-    "directoryCount": 2,
-    "fileCount"     : 1,
-    "length"        : 24930,
-    "quota"         : -1,
-    "spaceConsumed" : 24930,
-    "spaceQuota"    : -1
-  }
-}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/getContentSummary">FileSystem.getContentSummary</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="GETFILECHECKSUM">
-        <title>Get File Checksum</title>
-<ul>
-  <li>Submit a HTTP GET request.
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=GETFILECHECKSUM"
-    </source>
-The request is redirected to a datanode:
-    <source>
-HTTP/1.1 307 TEMPORARY_REDIRECT
-Location: http://&lt;DATANODE&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=GETFILECHECKSUM...
-Content-Length: 0
-    </source>
-The client follows the redirect to the datanode and receives a <a href="#FileChecksum"><code>FileChecksum</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "FileChecksum":
-  {
-    "algorithm": "MD5-of-1MD5-of-512CRC32",
-    "bytes"    : "eadb10de24aa315748930df6e185c0d ...",
-    "length"   : 28
-  }
-}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/getFileChecksum">FileSystem.getFileChecksum</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="GETHOMEDIRECTORY">
-        <title>Get Home Directory</title>
-<ul>
-  <li>Submit a HTTP GET request.
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/?op=GETHOMEDIRECTORY"
-    </source>
-The client receives a response with a <a href="#Path"><code>Path</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"Path": "/user/szetszwo"}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/getHomeDirectory">FileSystem.getHomeDirectory</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="SETPERMISSION">
-        <title>Set Permission</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=SETPERMISSION
-                              [&amp;permission=&lt;OCTAL&gt;]"
-    </source>
-The client receives a response with zero content length:
-    <source>
-HTTP/1.1 200 OK
-Content-Length: 0
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#permission"><code>permission</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/setPermission">FileSystem.setPermission</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="SETOWNER">
-        <title>Set Owner</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=SETOWNER
-                              [&amp;owner=&lt;USER&gt;][&amp;group=&lt;GROUP&gt;]"
-    </source>
-The client receives a response with zero content length:
-    <source>
-HTTP/1.1 200 OK
-Content-Length: 0
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#owner"><code>owner</code></a>,
-  <a href="#group"><code>group</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/setOwner">FileSystem.setOwner</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="SETREPLICATION">
-        <title>Set Replication Factor</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=SETREPLICATION
-                              [&amp;replication=&lt;SHORT&gt;]"
-    </source>
-The client receives a response with a <a href="#boolean"><code>boolean</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"boolean": true}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#replication"><code>replication</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/setReplication">FileSystem.setReplication</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="SETTIMES">
-        <title>Set Access or Modification Time</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/&lt;PATH&gt;?op=SETTIMES
-                              [&amp;modificationtime=&lt;TIME&gt;][&amp;accesstime=&lt;TIME&gt;]"
-    </source>
-The client receives a response with zero content length:
-    <source>
-HTTP/1.1 200 OK
-Content-Length: 0
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#modificationtime"><code>modificationtime</code></a>,
-  <a href="#accesstime"><code>accesstime</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/setTimes">FileSystem.setTimes</a>
-</p>
-      </section>
-    </section>
-<!-- ***************************************************************************** -->
-<!-- ***************************************************************************** -->
-    <section>
-      <title>Delegation Token Operations</title>
-      <section id="GETDELEGATIONTOKEN">
-        <title>Get Delegation Token</title>
-<ul>
-  <li>Submit a HTTP GET request.
-    <source>
-curl -i "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/?op=GETDELEGATIONTOKEN&amp;renewer=&lt;USER&gt;"
-    </source>
-The client receives a response with a <a href="#Token"><code>Token</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "Token":
-  {
-    "urlString": "JQAIaG9y..."
-  }
-}
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#renewer"><code>renewer</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/filesystem/getDelegationToken">FileSystem.getDelegationToken</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="RENEWDELEGATIONTOKEN">
-        <title>Renew Delegation Token</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&amp;token=&lt;TOKEN&gt;"
-    </source>
-The client receives a response with a <a href="#long"><code>long</code> JSON object</a>:
-    <source>
-HTTP/1.1 200 OK
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{"long": 1320962673997}           //the new expiration time
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#token"><code>token</code></a>,
-  DistributedFileSystem.renewDelegationToken
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="CANCELDELEGATIONTOKEN">
-        <title>Cancel Delegation Token</title>
-<ul>
-  <li>Submit a HTTP PUT request.
-    <source>
-curl -i -X PUT "http://&lt;HOST&gt;:&lt;PORT&gt;/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&amp;token=&lt;TOKEN&gt;"
-    </source>
-The client receives a response with zero content length:
-    <source>
-HTTP/1.1 200 OK
-Content-Length: 0
-    </source>
-  </li>
-</ul>
-<p>
-  See also:
-  <a href="#token"><code>token</code></a>,
-  DistributedFileSystem.cancelDelegationToken
-</p>
-      </section>
-    </section>
-<!-- ***************************************************************************** -->
-<!-- ***************************************************************************** -->
-    <section>
-      <title>Error Responses</title>
-<p>
-  When an operation fails, the server may throw an exception.
-  The JSON schema of error responses is defined in <a href="#RemoteException"><code>RemoteException</code> JSON schema</a>.
-  The table below shows the mapping from exceptions to HTTP response codes.
-</p>
-      <section>
-        <title>HTTP Response Codes</title>
-<table>
-<tr><th>Exceptions</th><th>HTTP Response Codes</th></tr>
-<tr><td><code>IllegalArgumentException     </code></td><td><code>400 Bad Request          </code></td></tr>
-<tr><td><code>UnsupportedOperationException</code></td><td><code>400 Bad Request          </code></td></tr>
-<tr><td><code>SecurityException            </code></td><td><code>401 Unauthorized         </code></td></tr>
-<tr><td><code>IOException                  </code></td><td><code>403 Forbidden            </code></td></tr>
-<tr><td><code>FileNotFoundException        </code></td><td><code>404 Not Found            </code></td></tr>
-<tr><td><code>RumtimeException             </code></td><td><code>500 Internal Server Error</code></td></tr>
-</table>
-<p>
-  Below are examples of exception responses.
-</p>
-<!-- ***************************************************************************** -->
-        <section>
-          <title>Illegal Argument Exception</title>
-<source>
-HTTP/1.1 400 Bad Request
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "RemoteException":
-  {
-    "exception"    : "IllegalArgumentException",
-    "javaClassName": "java.lang.IllegalArgumentException",
-    "message"      : "Invalid value for webhdfs parameter \"permission\": ..."
-  }
-}
-</source>
-        </section>
-<!-- ***************************************************************************** -->
-        <section>
-          <title>Security Exception</title>
-<source>
-HTTP/1.1 401 Unauthorized
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "RemoteException":
-  {
-    "exception"    : "SecurityException",
-    "javaClassName": "java.lang.SecurityException",
-    "message"      : "Failed to obtain user group information: ..."
-  }
-}
-</source>
-        </section>
-<!-- ***************************************************************************** -->
-        <section>
-          <title>Access Control Exception</title>
-<source>
-HTTP/1.1 403 Forbidden
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "RemoteException":
-  {
-    "exception"    : "AccessControlException",
-    "javaClassName": "org.apache.hadoop.security.AccessControlException",
-    "message"      : "Permission denied: ..."
-  }
-}
-</source>
-        </section>
-<!-- ***************************************************************************** -->
-        <section>
-          <title>File Not Found Exception</title>
-<source>
-HTTP/1.1 404 Not Found
-Content-Type: application/json
-Transfer-Encoding: chunked
-
-{
-  "RemoteException":
-  {
-    "exception"    : "FileNotFoundException",
-    "javaClassName": "java.io.FileNotFoundException",
-    "message"      : "File does not exist: /foo/a.patch"
-  }
-}
-</source>
-        </section>
-      </section>
-    </section>
-<!-- ***************************************************************************** -->
-<!-- ***************************************************************************** -->
-    <section>
-      <title>JSON Schemas</title>
-<p>
-All operations, except for <a href="#OPEN"><code>OPEN</code></a>,
-either return a zero-length response or a JSON response .
-For <a href="#OPEN"><code>OPEN</code></a>, the response is an octet-stream.
-The JSON schemas are shown below.
-See <a href="http://tools.ietf.org/id/draft-zyp-json-schema-03.html">draft-zyp-json-schema-03</a>
-for the syntax definitions of the JSON schemas.
-</p>
-      <section id="boolean">
-        <title>Boolean JSON Schema</title>
-<source>
-{
-  "name"      : "boolean",
-  "properties":
-  {
-    "boolean":
-    {
-      "description": "A boolean value",
-      "type"       : "boolean",
-      "required"   : true
-    }
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#MKDIRS"><code>MKDIRS</code></a>,
-  <a href="#RENAME"><code>RENAME</code></a>,
-  <a href="#DELETE"><code>DELETE</code></a>,
-  <a href="#SETREPLICATION"><code>SETREPLICATION</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="ContentSummary">
-        <title>ContentSummary JSON Schema</title>
-<source>
-{
-  "name"      : "ContentSummary",
-  "properties":
-  {
-    "ContentSummary":
-    {
-      "type"      : "object",
-      "properties":
-      {
-        "directoryCount":
-        {
-          "description": "The number of directories.",
-          "type"       : "integer",
-          "required"   : true
-        },
-        "fileCount":
-        {
-          "description": "The number of files.",
-          "type"       : "integer",
-          "required"   : true
-        },
-        "length":
-        {
-          "description": "The number of bytes used by the content.",
-          "type"       : "integer",
-          "required"   : true
-        },
-        "quota":
-        {
-          "description": "The namespace quota of this directory.",
-          "type"       : "integer",
-          "required"   : true
-        },
-        "spaceConsumed":
-        {
-          "description": "The disk space consumed by the content.",
-          "type"       : "integer",
-          "required"   : true
-        },
-        "spaceQuota":
-        {
-          "description": "The disk space quota.",
-          "type"       : "integer",
-          "required"   : true
-        }
-      }
-    }
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#GETCONTENTSUMMARY"><code>GETCONTENTSUMMARY</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="FileChecksum">
-        <title>FileChecksum JSON Schema</title>
-<source>
-{
-  "name"      : "FileChecksum",
-  "properties":
-  {
-    "FileChecksum":
-    {
-      "type"      : "object",
-      "properties":
-      {
-        "algorithm":
-        {
-          "description": "The name of the checksum algorithm.",
-          "type"       : "string",
-          "required"   : true
-        },
-        "bytes":
-        {
-          "description": "The byte sequence of the checksum in hexadecimal.",
-          "type"       : "string",
-          "required"   : true
-        },
-        "length":
-        {
-          "description": "The length of the bytes (not the length of the string).",
-          "type"       : "integer",
-          "required"   : true
-        }
-      }
-    }
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#GETFILECHECKSUM"><code>GETFILECHECKSUM</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="FileStatus">
-        <title>FileStatus JSON Schema</title>
-<source>
-{
-  "name"      : "FileStatus",
-  "properties":
-  {
-    "FileStatus": fileStatusProperties      //See <a href="#fileStatusProperties">FileStatus Properties</a>
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#GETFILESTATUS"><code>GETFILESTATUS</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/FileStatus">FileStatus</a>
-</p>
-      <section id="fileStatusProperties">
-        <title>FileStatus Properties</title>
-<p>
-  JavaScript syntax is used to define <code>fileStatusProperties</code>
-  so that it can be referred in both <code>FileStatus</code> and <code>FileStatuses</code> JSON schemas.
-</p>
-<source>
-var fileStatusProperties =
-{
-  "type"      : "object",
-  "properties":
-  {
-    "accessTime":
-    {
-      "description": "The access time.",
-      "type"       : "integer",
-      "required"   : true
-    },
-    "blockSize":
-    {
-      "description": "The block size of a file.",
-      "type"       : "integer",
-      "required"   : true
-    },
-    "group":
-    {
-      "description": "The group owner.",
-      "type"       : "string",
-      "required"   : true
-    },
-    "length":
-    {
-      "description": "The number of bytes in a file.",
-      "type"       : "integer",
-      "required"   : true
-    },
-    "modificationTime":
-    {
-      "description": "The modification time.",
-      "type"       : "integer",
-      "required"   : true
-    },
-    "owner":
-    {
-      "description": "The user who is the owner.",
-      "type"       : "string",
-      "required"   : true
-    },
-    "pathSuffix":
-    {
-      "description": "The path suffix.",
-      "type"       : "string",
-      "required"   : true
-    },
-    "permission":
-    {
-      "description": "The permission represented as a octal string.",
-      "type"       : "string",
-      "required"   : true
-    },
-    "replication":
-    {
-      "description": "The number of replication of a file.",
-      "type"       : "integer",
-      "required"   : true
-    },
-   "symlink":                                         //an optional property
-    {
-      "description": "The link target of a symlink.",
-      "type"       : "string"
-    },
-   "type":
-    {
-      "description": "The type of the path object.",
-      "enum"       : ["FILE", "DIRECTORY", "SYMLINK"],
-      "required"   : true
-    }
-  }
-};
-</source>
-        </section>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="FileStatuses">
-        <title>FileStatuses JSON Schema</title>
-<p>
-  A <code>FileStatuses</code> JSON object represents an array of <code>FileStatus</code> JSON objects.
-</p>
-<source>
-{
-  "name"      : "FileStatuses",
-  "properties":
-  {
-    "FileStatuses":
-    {
-      "type"      : "object",
-      "properties":
-      {
-        "FileStatus":
-        {
-          "description": "An array of FileStatus",
-          "type"       : "array",
-          "items"      : fileStatusProperties      //See <a href="#fileStatusProperties">FileStatus Properties</a>
-        }
-      }
-    }
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#LISTSTATUS"><code>LISTSTATUS</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/FileStatus">FileStatus</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="long">
-        <title>Long JSON Schema</title>
-<source>
-{
-  "name"      : "long",
-  "properties":
-  {
-    "long":
-    {
-      "description": "A long integer value",
-      "type"       : "integer",
-      "required"   : true
-    }
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#RENEWDELEGATIONTOKEN"><code>RENEWDELEGATIONTOKEN</code></a>,
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="Path">
-        <title>Path JSON Schema</title>
-<source>
-{
-  "name"      : "Path",
-  "properties":
-  {
-    "Path":
-    {
-      "description": "The string representation a Path.",
-      "type"       : "string",
-      "required"   : true
-    }
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#GETHOMEDIRECTORY"><code>GETHOMEDIRECTORY</code></a>,
-  <a href="ext:api/org/apache/hadoop/fs/Path">Path</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="RemoteException">
-        <title>RemoteException JSON Schema</title>
-<source>
-{
-  "name"      : "RemoteException",
-  "properties":
-  {
-    "RemoteException":
-    {
-      "type"      : "object",
-      "properties":
-      {
-        "exception":
-        {
-          "description": "Name of the exception",
-          "type"       : "string",
-          "required"   : true
-        },
-        "message":
-        {
-          "description": "Exception message",
-          "type"       : "string",
-          "required"   : true
-        },
-        "javaClassName":                                     //an optional property
-        {
-          "description": "Java class name of the exception",
-          "type"       : "string",
-        }
-      }
-    }
-  }
-}
-</source>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="Token">
-        <title>Token JSON Schema</title>
-<source>
-{
-  "name"      : "Token",
-  "properties":
-  {
-    "Token":
-    {
-      "type"      : "object",
-      "properties":
-      {
-        "urlString":
-        {
-          "description": "A delegation token encoded as a URL safe string.",
-          "type"       : "string",
-          "required"   : true
-        }
-      }
-    }
-  }
-}
-</source>
-<p>
-  See also:
-  <a href="#GETDELEGATIONTOKEN"><code>GETDELEGATIONTOKEN</code></a>,
-  the note in <a href="#delegation">Delegation</a>.
-</p>
-      </section>
-    </section>
-<!-- ***************************************************************************** -->
-<!-- ***************************************************************************** -->
-    <section id="ParameterDictionary">
-      <title>HTTP Query Parameter Dictionary</title>
-      <section id="accesstime">
-        <title>Access Time</title>
-<table>
-  <tr><td>Name</td><td><code>accesstime</code></td></tr>
-  <tr><td>Description</td><td>The access time of a file/directory.</td></tr>
-  <tr><td>Type</td><td>long</td></tr>
-  <tr><td>Default Value</td><td>-1 (means keeping it unchanged)</td></tr>
-  <tr><td>Valid Values</td><td>-1 or a timestamp</td></tr>
-  <tr><td>Syntax</td><td>Any integer.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#SETTIMES"><code>SETTIMES</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="blocksize">
-        <title>Block Size</title>
-<table>
-  <tr><td>Name</td><td><code>blocksize</code></td></tr>
-  <tr><td>Description</td><td>The block size of a file.</td></tr>
-  <tr><td>Type</td><td>long</td></tr>
-  <tr><td>Default Value</td><td>Specified in the configuration.</td></tr>
-  <tr><td>Valid Values</td><td>&gt; 0</td></tr>
-  <tr><td>Syntax</td><td>Any integer.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#CREATE"><code>CREATE</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="buffersize">
-        <title>Buffer Size</title>
-<table>
-  <tr><td>Name</td><td><code>buffersize</code></td></tr>
-  <tr><td>Description</td><td>The size of the buffer used in transferring data.</td></tr>
-  <tr><td>Type</td><td>int</td></tr>
-  <tr><td>Default Value</td><td>Specified in the configuration.</td></tr>
-  <tr><td>Valid Values</td><td>&gt; 0</td></tr>
-  <tr><td>Syntax</td><td>Any integer.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#CREATE"><code>CREATE</code></a>,
-  <a href="#APPEND"><code>APPEND</code></a>,
-  <a href="#OPEN"><code>OPEN</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="delegation">
-        <title>Delegation</title>
-<table>
-  <tr><td>Name</td><td><code>delegation</code></td></tr>
-  <tr><td>Description</td><td>The delegation token used for authentication.</td></tr>
-  <tr><td>Type</td><td>String</td></tr>
-  <tr><td>Default Value</td><td>&lt;empty&gt;</td></tr>
-  <tr><td>Valid Values</td><td>An encoded token.</td></tr>
-  <tr><td>Syntax</td><td>See the note below.</td></tr>
-</table>
-<p>
-  <strong>Note</strong> that delegation tokens are encoded as a URL safe string;
-  see <code>encodeToUrlString()</code>
-  and <code>decodeFromUrlString(String)</code>
-  in <code>org.apache.hadoop.security.token.Token</code> for the details of the encoding.
-</p>
-<p>
-  See also:
-  <a href="#Authentication">Authentication</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="destination">
-        <title>Destination</title>
-<table>
-  <tr><td>Name</td><td><code>destination</code></td></tr>
-  <tr><td>Description</td><td>The destination path used in <a href="#RENAME">RENAME</a>.</td></tr>
-  <tr><td>Type</td><td>Path</td></tr>
-  <tr><td>Default Value</td><td>&lt;empty&gt; (an invalid path)</td></tr>
-  <tr><td>Valid Values</td><td>An absolute FileSystem path without scheme and authority.</td></tr>
-  <tr><td>Syntax</td><td>Any path.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#RENAME"><code>RENAME</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="doas">
-        <title>Do As</title>
-<table>
-  <tr><td>Name</td><td><code>doas</code></td></tr>
-  <tr><td>Description</td><td>Allowing a proxy user to do as another user.</td></tr>
-  <tr><td>Type</td><td>String</td></tr>
-  <tr><td>Default Value</td><td>null</td></tr>
-  <tr><td>Valid Values</td><td>Any valid username.</td></tr>
-  <tr><td>Syntax</td><td>Any string.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#ProxyUsers">Proxy Users</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="group">
-        <title>Group</title>
-<table>
-  <tr><td>Name</td><td><code>group</code></td></tr>
-  <tr><td>Description</td><td>The name of a group.</td></tr>
-  <tr><td>Type</td><td>String</td></tr>
-  <tr><td>Default Value</td><td>&lt;empty&gt; (means keeping it unchanged)</td></tr>
-  <tr><td>Valid Values</td><td>Any valid group name.</td></tr>
-  <tr><td>Syntax</td><td>Any string.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#SETOWNER"><code>SETOWNER</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="length">
-        <title>Length</title>
-<table>
-  <tr><td>Name</td><td><code>length</code></td></tr>
-  <tr><td>Description</td><td>The number of bytes to be processed.</td></tr>
-  <tr><td>Type</td><td>long</td></tr>
-  <tr><td>Default Value</td><td>null (means the entire file)</td></tr>
-  <tr><td>Valid Values</td><td>&gt;= 0 or null</td></tr>
-  <tr><td>Syntax</td><td>Any integer.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#OPEN"><code>OPEN</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="modificationtime">
-        <title>Modification Time</title>
-<table>
-  <tr><td>Name</td><td><code>modificationtime</code></td></tr>
-  <tr><td>Description</td><td>The modification time of a file/directory.</td></tr>
-  <tr><td>Type</td><td>long</td></tr>
-  <tr><td>Default Value</td><td>-1 (means keeping it unchanged)</td></tr>
-  <tr><td>Valid Values</td><td>-1 or a timestamp</td></tr>
-  <tr><td>Syntax</td><td>Any integer.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#SETTIMES"><code>SETTIMES</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="offset">
-        <title>Offset</title>
-<table>
-  <tr><td>Name</td><td><code>offset</code></td></tr>
-  <tr><td>Description</td><td>The starting byte position.</td></tr>
-  <tr><td>Type</td><td>long</td></tr>
-  <tr><td>Default Value</td><td>0</td></tr>
-  <tr><td>Valid Values</td><td>&gt;= 0</td></tr>
-  <tr><td>Syntax</td><td>Any integer.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#OPEN"><code>OPEN</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="op">
-        <title>Op</title>
-<table>
-  <tr><td>Name</td><td><code>op</code></td></tr>
-  <tr><td>Description</td><td>The name of the operation to be executed.</td></tr>
-  <tr><td>Type</td><td>enum</td></tr>
-  <tr><td>Default Value</td><td>null (an invalid value)</td></tr>
-  <tr><td>Valid Values</td><td>Any valid operation name.</td></tr>
-  <tr><td>Syntax</td><td>Any string.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#Operations">Operations</a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="overwrite">
-        <title>Overwrite</title>
-<table>
-  <tr><td>Name</td><td><code>overwrite</code></td></tr>
-  <tr><td>Description</td><td>If a file already exists, should it be overwritten?</td></tr>
-  <tr><td>Type</td><td>boolean</td></tr>
-  <tr><td>Default Value</td><td>false</td></tr>
-  <tr><td>Valid Values</td><td>true | false</td></tr>
-  <tr><td>Syntax</td><td>true | false</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#CREATE"><code>CREATE</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="owner">
-        <title>Owner</title>
-<table>
-  <tr><td>Name</td><td><code>owner</code></td></tr>
-  <tr><td>Description</td><td>The username who is the owner of a file/directory.</td></tr>
-  <tr><td>Type</td><td>String</td></tr>
-  <tr><td>Default Value</td><td>&lt;empty&gt; (means keeping it unchanged)</td></tr>
-  <tr><td>Valid Values</td><td>Any valid username.</td></tr>
-  <tr><td>Syntax</td><td>Any string.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#SETOWNER"><code>SETOWNER</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="permission">
-        <title>Permission</title>
-<table>
-  <tr><td>Name</td><td><code>permission</code></td></tr>
-  <tr><td>Description</td><td>The permission of a file/directory.</td></tr>
-  <tr><td>Type</td><td>Octal</td></tr>
-  <tr><td>Default Value</td><td>755</td></tr>
-  <tr><td>Valid Values</td><td>0 - 1777</td></tr>
-  <tr><td>Syntax</td><td>Any radix-8 integer (leading zeros may be omitted.)</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#CREATE"><code>CREATE</code></a>,
-  <a href="#MKDIRS"><code>MKDIRS</code></a>,
-  <a href="#SETPERMISSION"><code>SETPERMISSION</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="recursive">
-        <title>Recursive</title>
-<table>
-  <tr><td>Name</td><td><code>recursive</code></td></tr>
-  <tr><td>Description</td><td>Should the operation act on the content in the subdirectories?</td></tr>
-  <tr><td>Type</td><td>boolean</td></tr>
-  <tr><td>Default Value</td><td>false</td></tr>
-  <tr><td>Valid Values</td><td>true | false</td></tr>
-  <tr><td>Syntax</td><td>true | false</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#RENAME"><code>RENAME</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="renewer">
-        <title>Renewer</title>
-<table>
-  <tr><td>Name</td><td><code>renewer</code></td></tr>
-  <tr><td>Description</td><td>The username of the renewer of a delegation token.</td></tr>
-  <tr><td>Type</td><td>String</td></tr>
-  <tr><td>Default Value</td><td>&lt;empty&gt; (means the current user)</td></tr>
-  <tr><td>Valid Values</td><td>Any valid username.</td></tr>
-  <tr><td>Syntax</td><td>Any string.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#GETDELEGATIONTOKEN"><code>GETDELEGATIONTOKEN</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="replication">
-        <title>Replication</title>
-<table>
-  <tr><td>Name</td><td><code>replication</code></td></tr>
-  <tr><td>Description</td><td>The number of replications of a file.</td></tr>
-  <tr><td>Type</td><td>short</td></tr>
-  <tr><td>Default Value</td><td>Specified in the configuration.</td></tr>
-  <tr><td>Valid Values</td><td>&gt; 0</td></tr>
-  <tr><td>Syntax</td><td>Any integer.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#CREATE"><code>CREATE</code></a>,
-  <a href="#SETREPLICATION"><code>SETREPLICATION</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="token">
-        <title>Token</title>
-<table>
-  <tr><td>Name</td><td><code>token</code></td></tr>
-  <tr><td>Description</td><td>The delegation token used for the operation.</td></tr>
-  <tr><td>Type</td><td>String</td></tr>
-  <tr><td>Default Value</td><td>&lt;empty&gt;</td></tr>
-  <tr><td>Valid Values</td><td>An encoded token.</td></tr>
-  <tr><td>Syntax</td><td>See the note in <a href="#delegation">Delegation</a>.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#RENEWDELEGATIONTOKEN"><code>RENEWDELEGATIONTOKEN</code></a>,
-  <a href="#CANCELDELEGATIONTOKEN"><code>CANCELDELEGATIONTOKEN</code></a>
-</p>
-      </section>
-<!-- ***************************************************************************** -->
-      <section id="user.name">
-        <title>Username</title>
-<table>
-  <tr><td>Name</td><td><code>user.name</code></td></tr>
-  <tr><td>Description</td><td>The authenticated user; see <a href="#Authentication">Authentication</a>.</td></tr>
-  <tr><td>Type</td><td>String</td></tr>
-  <tr><td>Default Value</td><td>null</td></tr>
-  <tr><td>Valid Values</td><td>Any valid username.</td></tr>
-  <tr><td>Syntax</td><td>Any string.</td></tr>
-</table>
-<p>
-  See also:
-  <a href="#Authentication">Authentication</a>
-</p>
-      </section>
-    </section>
-  </body>
-</document>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.gif
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.gif b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.gif
deleted file mode 100644
index 94ccd83..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.odg
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.odg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.odg
deleted file mode 100644
index 7a5ba85..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/FI-framework.odg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/architecture.gif
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/architecture.gif b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/architecture.gif
deleted file mode 100644
index 8d84a23..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/architecture.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/core-logo.gif
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/core-logo.gif b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/core-logo.gif
deleted file mode 100644
index 57879bb..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/core-logo.gif and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/favicon.ico
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/favicon.ico b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/favicon.ico
deleted file mode 100644
index 161bcf7..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/favicon.ico and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo-big.jpg
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo-big.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo-big.jpg
deleted file mode 100644
index 0c6996c..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo-big.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo.jpg
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo.jpg
deleted file mode 100644
index 809525d..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hadoop-logo.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/request-identify.jpg
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/request-identify.jpg b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/request-identify.jpg
deleted file mode 100644
index 504cbaf..0000000
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/request-identify.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/skinconf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/skinconf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/skinconf.xml
deleted file mode 100644
index cfb2010..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/skinconf.xml
+++ /dev/null
@@ -1,366 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-Skin configuration file. This file contains details of your project,
-which will be used to configure the chosen Forrest skin.
--->
-
-<!DOCTYPE skinconfig PUBLIC "-//APACHE//DTD Skin Configuration V0.6-3//EN" "http://forrest.apache.org/dtd/skinconfig-v06-3.dtd">
-<skinconfig>
-  <!-- To enable lucene search add provider="lucene" (default is google).
-    Add box-location="alt" to move the search box to an alternate location
-    (if the skin supports it) and box-location="all" to show it in all
-    available locations on the page.  Remove the <search> element to show
-    no search box. @domain will enable sitesearch for the specific domain with google.
-    In other words google will search the @domain for the query string.
-
-  -->
-  <search name="Lucene" domain="hadoop.apache.org" provider="google"/>
-
-  <!-- Disable the print link? If enabled, invalid HTML 4.0.1 -->
-  <disable-print-link>true</disable-print-link>  
-  <!-- Disable the PDF link? -->
-  <disable-pdf-link>false</disable-pdf-link>
-  <!-- Disable the POD link? -->
-  <disable-pod-link>true</disable-pod-link>
-  <!-- Disable the Text link? FIXME: NOT YET IMPLEMENETED. -->
-  <disable-txt-link>true</disable-txt-link>
-  <!-- Disable the xml source link? -->
-  <!-- The xml source link makes it possible to access the xml rendition
-    of the source frim the html page, and to have it generated statically.
-    This can be used to enable other sites and services to reuse the
-    xml format for their uses. Keep this disabled if you don't want other
-    sites to easily reuse your pages.-->
-  <disable-xml-link>true</disable-xml-link>
-
-  <!-- Disable navigation icons on all external links? -->
-  <disable-external-link-image>true</disable-external-link-image>
-
-  <!-- Disable w3c compliance links? 
-    Use e.g. align="center" to move the compliance links logos to 
-    an alternate location default is left.
-    (if the skin supports it) -->
-  <disable-compliance-links>true</disable-compliance-links>
-
-  <!-- Render mailto: links unrecognisable by spam harvesters? -->
-  <obfuscate-mail-links>false</obfuscate-mail-links>
-
-  <!-- Disable the javascript facility to change the font size -->
-  <disable-font-script>true</disable-font-script>
-
-  <!-- project logo -->
-  <project-name>Hadoop</project-name>
-  <project-description>Scalable Computing Platform</project-description>
-  <project-url>http://hadoop.apache.org/hdfs/</project-url>
-  <project-logo>images/hdfs-logo.jpg</project-logo>
-
-  <!-- group logo -->
-  <group-name>Hadoop</group-name>
-  <group-description>Apache Hadoop</group-description>
-  <group-url>http://hadoop.apache.org/</group-url>
-  <group-logo>images/hadoop-logo.jpg</group-logo>
-
-  <!-- optional host logo (e.g. sourceforge logo)
-       default skin: renders it at the bottom-left corner -->
-  <host-url></host-url>
-  <host-logo></host-logo>
-
-  <!-- relative url of a favicon file, normally favicon.ico -->
-  <favicon-url>images/favicon.ico</favicon-url>
-
-  <!-- The following are used to construct a copyright statement -->
-  <year>2010</year>
-  <vendor>The Apache Software Foundation.</vendor>
-  <copyright-link>http://www.apache.org/licenses/</copyright-link>
-
-  <!-- Some skins use this to form a 'breadcrumb trail' of links.
-    Use location="alt" to move the trail to an alternate location
-    (if the skin supports it).
-	  Omit the location attribute to display the trail in the default location.
-	  Use location="none" to not display the trail (if the skin supports it).
-    For some skins just set the attributes to blank.
-  -->
-  <trail>
-    <link1 name="Apache" href="http://www.apache.org/"/>
-    <link2 name="Hadoop" href="http://hadoop.apache.org/"/>
-    <link3 name="Core" href="http://hadoop.apache.org/core/"/>
-  </trail>
-
-  <!-- Configure the TOC, i.e. the Table of Contents.
-  @max-depth
-   how many "section" levels need to be included in the
-   generated Table of Contents (TOC). 
-  @min-sections
-   Minimum required to create a TOC.
-  @location ("page","menu","page,menu", "none")
-   Where to show the TOC.
-  -->
-  <toc max-depth="2" min-sections="1" location="page"/>
-
-  <!-- Heading types can be clean|underlined|boxed  -->
-  <headings type="clean"/>
-  
-  <!-- The optional feedback element will be used to construct a
-    feedback link in the footer with the page pathname appended:
-    <a href="@href">{@to}</a>
-  <feedback to="webmaster@foo.com"
-    href="mailto:webmaster@foo.com?subject=Feedback&#160;" >
-    Send feedback about the website to:
-  </feedback>
-    -->
-  <!--
-    extra-css - here you can define custom css-elements that are 
-    a. overriding the fallback elements or 
-    b. adding the css definition from new elements that you may have 
-       used in your documentation.
-    -->
-  <extra-css>
-    <!--Example of b. 
-        To define the css definition of a new element that you may have used
-        in the class attribute of a <p> node. 
-        e.g. <p class="quote"/>
-    -->
-    p.quote {
-      margin-left: 2em;
-      padding: .5em;
-      background-color: #f0f0f0;
-      font-family: monospace;
-    }
-    
-    <!--Headers -->
-	#content h1 {
-	  margin-bottom: .5em;
-	  font-size: 185%; color: black;
-	  font-family: arial;
-	}  
-    h2, .h3 { font-size: 175%; color: black; font-family: arial; }
-	h3, .h4 { font-size: 135%; color: black; font-family: arial; margin-bottom: 0.5em; }
-	h4, .h5 { font-size: 125%; color: black;  font-style: italic; font-weight: bold; font-family: arial; }
-	h5, h6 { font-size: 110%; color: #363636; font-weight: bold; }    
-   
-   <!--Code Background -->
-    pre.code {
-      margin-left: 0em;
-      padding: 0.5em;
-      background-color: rgb(241,239,231);
-      font-family: monospace;
-    }   
-    
-  </extra-css>
-
-  <colors>
-  <!-- These values are used for the generated CSS files. -->
-
-  <!-- Krysalis -->
-<!--
-    <color name="header"    value="#FFFFFF"/>
-
-    <color name="tab-selected" value="#a5b6c6" link="#000000" vlink="#000000" hlink="#000000"/>
-    <color name="tab-unselected" value="#F7F7F7"  link="#000000" vlink="#000000" hlink="#000000"/>
-    <color name="subtab-selected" value="#a5b6c6"  link="#000000" vlink="#000000" hlink="#000000"/>
-    <color name="subtab-unselected" value="#a5b6c6"  link="#000000" vlink="#000000" hlink="#000000"/>
-
-    <color name="heading" value="#a5b6c6"/>
-    <color name="subheading" value="#CFDCED"/>
-        
-    <color name="navstrip" value="#CFDCED" font="#000000" link="#000000" vlink="#000000" hlink="#000000"/>
-    <color name="toolbox" value="#a5b6c6"/>
-    <color name="border" value="#a5b6c6"/>
-        
-    <color name="menu" value="#F7F7F7" link="#000000" vlink="#000000" hlink="#000000"/>    
-    <color name="dialog" value="#F7F7F7"/>
-            
-    <color name="body"    value="#ffffff" link="#0F3660" vlink="#009999" hlink="#000066"/>
-    
-    <color name="table" value="#a5b6c6"/>    
-    <color name="table-cell" value="#ffffff"/>    
-    <color name="highlight" value="#ffff00"/>
-    <color name="fixme" value="#cc6600"/>
-    <color name="note" value="#006699"/>
-    <color name="warning" value="#990000"/>
-    <color name="code" value="#a5b6c6"/>
-        
-    <color name="footer" value="#a5b6c6"/>
--->
-  
-  <!-- Forrest -->
-<!--
-    <color name="header"    value="#294563"/>
-
-    <color name="tab-selected" value="#4a6d8c" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
-    <color name="tab-unselected" value="#b5c7e7" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
-    <color name="subtab-selected" value="#4a6d8c" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
-    <color name="subtab-unselected" value="#4a6d8c" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
-
-    <color name="heading" value="#294563"/>
-    <color name="subheading" value="#4a6d8c"/>
-        
-    <color name="navstrip" value="#cedfef" font="#0F3660" link="#0F3660" vlink="#0F3660" hlink="#000066"/>
-    <color name="toolbox" value="#4a6d8c"/>
-    <color name="border" value="#294563"/>
-    
-    <color name="menu" value="#4a6d8c" font="#cedfef" link="#ffffff" vlink="#ffffff" hlink="#ffcf00"/>    
-    <color name="dialog" value="#4a6d8c"/>
-            
-    <color name="body" value="#ffffff"  link="#0F3660" vlink="#009999" hlink="#000066"/>
-    
-    <color name="table" value="#7099C5"/>    
-    <color name="table-cell" value="#f0f0ff"/>    
-    <color name="highlight" value="#ffff00"/>
-    <color name="fixme" value="#cc6600"/>
-    <color name="note" value="#006699"/>
-    <color name="warning" value="#990000"/>
-    <color name="code" value="#CFDCED"/>
-        
-    <color name="footer" value="#cedfef"/>
--->
-
-  <!-- Collabnet --> 
-<!--
-    <color name="header"    value="#003366"/>
-
-    <color name="tab-selected" value="#dddddd" link="#555555" vlink="#555555" hlink="#555555"/>
-    <color name="tab-unselected" value="#999999" link="#ffffff" vlink="#ffffff" hlink="#ffffff"/>
-    <color name="subtab-selected" value="#cccccc" link="#000000" vlink="#000000" hlink="#000000"/>
-    <color name="subtab-unselected" value="#cccccc" link="#555555" vlink="#555555" hlink="#555555"/>
-
-    <color name="heading" value="#003366"/>
-    <color name="subheading" value="#888888"/>
-    
-    <color name="navstrip" value="#dddddd" font="#555555"/>
-    <color name="toolbox" value="#dddddd" font="#555555"/>
-    <color name="border" value="#999999"/>
-    
-    <color name="menu" value="#ffffff"/>    
-    <color name="dialog" value="#eeeeee"/>
-            
-    <color name="body"      value="#ffffff"/>
-    
-    <color name="table" value="#ccc"/>    
-    <color name="table-cell" value="#ffffff"/>   
-    <color name="highlight" value="#ffff00"/>
-    <color name="fixme" value="#cc6600"/>
-    <color name="note" value="#006699"/>
-    <color name="warning" value="#990000"/>
-    <color name="code" value="#003366"/>
-        
-    <color name="footer" value="#ffffff"/>
--->
- <!-- Lenya using pelt-->
-<!--
-    <color name="header" value="#ffffff"/>
-
-    <color name="tab-selected" value="#4C6C8F" link="#ffffff" vlink="#ffffff" hlink="#ffffff"/>
-    <color name="tab-unselected" value="#E5E4D9" link="#000000" vlink="#000000" hlink="#000000"/>
-    <color name="subtab-selected" value="#000000" link="#000000" vlink="#000000" hlink="#000000"/>
-    <color name="subtab-unselected" value="#E5E4D9" link="#000000" vlink="#000000" hlink="#000000"/>
-
-    <color name="heading" value="#E5E4D9"/>
-    <color name="subheading" value="#000000"/>
-    <color name="published" value="#4C6C8F" font="#FFFFFF"/>
-    <color name="feedback" value="#4C6C8F" font="#FFFFFF" align="center"/>
-    <color name="navstrip" value="#E5E4D9" font="#000000"/>
-
-    <color name="toolbox" value="#CFDCED" font="#000000"/>
-
-    <color name="border" value="#999999"/>
-    <color name="menu" value="#4C6C8F" font="#ffffff" link="#ffffff" vlink="#ffffff" hlink="#ffffff" current="#FFCC33" />    
-    <color name="menuheading" value="#cfdced" font="#000000" />
-    <color name="searchbox" value="#E5E4D9" font="#000000"/>
-    
-    <color name="dialog" value="#CFDCED"/>
-    <color name="body" value="#ffffff" />            
-    
-    <color name="table" value="#ccc"/>    
-    <color name="table-cell" value="#ffffff"/>   
-    <color name="highlight" value="#ffff00"/>
-    <color name="fixme" value="#cc6600"/>
-    <color name="note" value="#006699"/>
-    <color name="warning" value="#990000"/>
-    <color name="code" value="#003366"/>
-        
-    <color name="footer" value="#E5E4D9"/>
--->
-  </colors>
- 
-  <!-- Settings specific to PDF output. -->
-  <pdf>
-    <!-- 
-       Supported page sizes are a0, a1, a2, a3, a4, a5, executive,
-       folio, legal, ledger, letter, quarto, tabloid (default letter).
-       Supported page orientations are portrait, landscape (default
-       portrait).
-       Supported text alignments are left, right, justify (default left).
-    -->
-    <page size="letter" orientation="portrait" text-align="left"/>
-
-    <!--
-       Margins can be specified for top, bottom, inner, and outer
-       edges. If double-sided="false", the inner edge is always left
-       and the outer is always right. If double-sided="true", the
-       inner edge will be left on odd pages, right on even pages,
-       the outer edge vice versa.
-       Specified below are the default settings.
-    -->
-    <margins double-sided="false">
-      <top>1in</top>
-      <bottom>1in</bottom>
-      <inner>1.25in</inner>
-      <outer>1in</outer>
-    </margins>
-
-    <!--
-      Print the URL text next to all links going outside the file
-    -->
-    <show-external-urls>false</show-external-urls>
-
-    <!--
-      Disable the copyright footer on each page of the PDF.
-      A footer is composed for each page. By default, a "credit" with role=pdf
-      will be used, as explained below. Otherwise a copyright statement
-      will be generated. This latter can be disabled.
-    -->
-    <disable-copyright-footer>false</disable-copyright-footer>
-  </pdf>
-
-  <!-- Credits are typically rendered as a set of small clickable
-    images in the page footer.
-    Use box-location="alt" to move the credit to an alternate location
-    (if the skin supports it). 
-  -->
-  <credits>
-    <credit box-location="alt">
-      <name>Built with Apache Forrest</name>
-      <url>http://forrest.apache.org/</url>
-      
-      <width>88</width>
-      <height>31</height>
-    </credit>
-    <!-- A credit with @role="pdf" will be used to compose a footer
-     for each page in the PDF, using either "name" or "url" or both.
-    -->
-    <!--
-    <credit role="pdf">
-      <name>Built with Apache Forrest</name>
-      <url>http://forrest.apache.org/</url>
-    </credit>
-    -->
-  </credits>
-
-</skinconfig>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/status.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/status.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/status.xml
deleted file mode 100644
index a820726..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/status.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<status>
-
-  <developers>
-    <person name="Joe Bloggs"      email="joe@joescompany.org"      id="JB" />
-    <!-- Add more people here -->
-  </developers>
-
-  <changes>
-    <!-- Add new releases here -->
-    <release version="0.1" date="unreleased">
-      <!-- Some action types have associated images. By default, images are
-      defined for 'add', 'fix', 'remove', 'update' and 'hack'. If you add
-      src/documentation/resources/images/<foo>.jpg images, these will
-      automatically be used for entries of type <foo>. -->
-
-      <action dev="JB" type="add" context="admin">
-        Initial Import
-      </action>
-      <!-- Sample action:
-      <action dev="JB" type="fix" due-to="Joe Contributor"
-          due-to-email="joec@apache.org" fixes-bug="123">
-          Fixed a bug in the Foo class.
-        </action>
-        -->
-    </release>
-  </changes>
-
-  <todo>
-    <actions priority="high">
-      <action context="docs" dev="JB">
-        Customize this template project with your project's details.  This
-        TODO list is generated from 'status.xml'.
-      </action>
-      <action context="docs" dev="JB">
-        Add lots of content.  XML content goes in
-        <code>src/documentation/content/xdocs</code>, or wherever the
-        <code>${project.xdocs-dir}</code> property (set in
-        <code>forrest.properties</code>) points.
-      </action>
-      <action context="feedback" dev="JB">
-        Mail <link
-          href="mailto:forrest-dev@xml.apache.org">forrest-dev@xml.apache.org</link>
-        with feedback.
-      </action>
-    </actions>
-    <!-- Add todo items. @context is an arbitrary string. Eg:
-    <actions priority="high">
-      <action context="code" dev="SN">
-      </action>
-    </actions>
-    <actions priority="medium">
-      <action context="docs" dev="open">
-      </action>
-    </actions>
-    -->
-  </todo>
-
-</status>


[41/50] [abbrv] hadoop git commit: HADOOP-12000. cannot use --java-home in test-patch (aw)

Posted by ji...@apache.org.
HADOOP-12000. cannot use --java-home in test-patch (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d6c5ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d6c5ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d6c5ce

Branch: refs/heads/HDFS-7240
Commit: 12d6c5ce4f78bc0e9464522715920866abe1f727
Parents: 7438966
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue May 19 15:24:23 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue May 19 15:24:23 2015 -0700

----------------------------------------------------------------------
 dev-support/test-patch.sh                       | 2 +-
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d6c5ce/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 57fd657..d5d6c26 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -679,7 +679,7 @@ function parse_args
       --issue-re=*)
         ISSUE_RE=${i#*=}
       ;;
-      --java-home)
+      --java-home=*)
         JAVA_HOME=${i#*=}
       ;;
       --jenkins)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d6c5ce/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4621f80..3e7cb39 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -719,6 +719,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11963. Metrics documentation for FSNamesystem misspells
     PendingDataNodeMessageCount. (Anu Engineer via cnauroth)
 
+    HADOOP-12000. cannot use --java-home in test-patch (aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[32/50] [abbrv] hadoop git commit: HDFS-8131. Implement a space balanced block placement policy. Contributed by Liu Shaohui.

Posted by ji...@apache.org.
HDFS-8131. Implement a space balanced block placement policy. Contributed by Liu Shaohui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de30d66b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de30d66b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de30d66b

Branch: refs/heads/HDFS-7240
Commit: de30d66b2673d0344346fb985e786247ca682317
Parents: eb4c9dd
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue May 19 08:04:38 2015 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue May 19 08:04:38 2015 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../AvailableSpaceBlockPlacementPolicy.java     |  95 +++++++++++
 .../BlockPlacementPolicyDefault.java            |  11 +-
 .../TestAvailableSpaceBlockPlacementPolicy.java | 167 +++++++++++++++++++
 5 files changed, 279 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e5fcba2..76888a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -345,6 +345,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7891. A block placement policy with best rack failure tolerance.
     (Walter Su via szetszwo)
 
+    HDFS-8131. Implement a space balanced block placement policy (Liu Shaohui
+    via kihwal)
+
   IMPROVEMENTS
 
     HDFS-3918. EditLogTailer shouldn't log WARN when other node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c903e76..9c19f91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -519,7 +519,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = "dfs.web.authentication.kerberos.keytab";
   public static final String  DFS_NAMENODE_MAX_OP_SIZE_KEY = "dfs.namenode.max.op.size";
   public static final int     DFS_NAMENODE_MAX_OP_SIZE_DEFAULT = 50 * 1024 * 1024;
-  
+  public static final String  DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY =
+      "dfs.namenode.available-space-block-placement-policy.balanced-space-preference-fraction";
+  public static final float   DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT =
+      0.6f;
+
   public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user";
   public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
   public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
new file mode 100644
index 0000000..74c1c78
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT;
+
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.net.NetworkTopology;
+
+/**
+ * Space balanced block placement policy.
+ */
+public class AvailableSpaceBlockPlacementPolicy extends
+    BlockPlacementPolicyDefault {
+  private static final Log LOG = LogFactory
+      .getLog(AvailableSpaceBlockPlacementPolicy.class);
+  private static final Random RAND = new Random();
+  private int balancedPreference =
+      (int) (100 * DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT);
+
+  @Override
+  public void initialize(Configuration conf, FSClusterStats stats,
+      NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) {
+    super.initialize(conf, stats, clusterMap, host2datanodeMap);
+    float balancedPreferencePercent =
+        conf.getFloat(
+          DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
+          DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT);
+
+    LOG.info("Available space block placement policy initialized: "
+        + DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
+        + " = " + balancedPreferencePercent);
+
+    if (balancedPreferencePercent > 1.0) {
+      LOG.warn("The value of "
+          + DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
+          + " is greater than 1.0 but should be in the range 0.0 - 1.0");
+    }
+    if (balancedPreferencePercent < 0.5) {
+      LOG.warn("The value of "
+          + DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
+          + " is less than 0.5 so datanodes with more used percent will"
+          + " receive  more block allocations.");
+    }
+    balancedPreference = (int) (100 * balancedPreferencePercent);
+  }
+
+  @Override
+  protected DatanodeDescriptor chooseDataNode(String scope) {
+    DatanodeDescriptor a = (DatanodeDescriptor) clusterMap.chooseRandom(scope);
+    DatanodeDescriptor b = (DatanodeDescriptor) clusterMap.chooseRandom(scope);
+    int ret = compareDataNode(a, b);
+    if (ret == 0) {
+      return a;
+    } else if (ret < 0) {
+      return (RAND.nextInt(100) < balancedPreference) ? a : b;
+    } else {
+      return (RAND.nextInt(100) < balancedPreference) ? b : a;
+    }
+  }
+
+  /**
+   * Compare the two data nodes.
+   */
+  protected int compareDataNode(final DatanodeDescriptor a,
+      final DatanodeDescriptor b) {
+    if (a.equals(b)
+        || Math.abs(a.getDfsUsedPercent() - b.getDfsUsedPercent()) < 5) {
+      return 0;
+    }
+    return a.getDfsUsedPercent() < b.getDfsUsedPercent() ? -1 : 1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 620d2a6..21ad01d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -646,8 +646,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     boolean badTarget = false;
     DatanodeStorageInfo firstChosen = null;
     while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
-      DatanodeDescriptor chosenNode = 
-          (DatanodeDescriptor)clusterMap.chooseRandom(scope);
+      DatanodeDescriptor chosenNode = chooseDataNode(scope);
       if (excludedNodes.add(chosenNode)) { //was not in the excluded list
         if (LOG.isDebugEnabled()) {
           builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" [");
@@ -709,6 +708,14 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   }
 
   /**
+   * Choose a datanode from the given <i>scope</i>.
+   * @return the chosen node, if there is any.
+   */
+  protected DatanodeDescriptor chooseDataNode(final String scope) {
+    return (DatanodeDescriptor) clusterMap.chooseRandom(scope);
+  }
+
+  /**
    * If the given storage is a good target, add it to the result list and
    * update the set of excluded nodes.
    * @return -1 if the given is not a good target;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
new file mode 100644
index 0000000..f1e4e1c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.File;
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestAvailableSpaceBlockPlacementPolicy {
+  private final static int numRacks = 4;
+  private final static int nodesPerRack = 5;
+  private final static int blockSize = 1024;
+  private final static int chooseTimes = 10000;
+  private final static String file = "/tobers/test";
+  private final static int replica = 3;
+
+  private static DatanodeStorageInfo[] storages;
+  private static DatanodeDescriptor[] dataNodes;
+  private static Configuration conf;
+  private static NameNode namenode;
+  private static BlockPlacementPolicy placementPolicy;
+  private static NetworkTopology cluster;
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    conf = new HdfsConfiguration();
+    conf.setFloat(
+      DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
+      0.6f);
+    String[] racks = new String[numRacks];
+    for (int i = 0; i < numRacks; i++) {
+      racks[i] = "/rack" + i;
+    }
+
+    String[] owerRackOfNodes = new String[numRacks * nodesPerRack];
+    for (int i = 0; i < nodesPerRack; i++) {
+      for (int j = 0; j < numRacks; j++) {
+        owerRackOfNodes[i * numRacks + j] = racks[j];
+      }
+    }
+
+    storages = DFSTestUtil.createDatanodeStorageInfos(owerRackOfNodes);
+    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
+
+    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+    File baseDir = PathUtils.getTestDir(AvailableSpaceBlockPlacementPolicy.class);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
+    conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+      AvailableSpaceBlockPlacementPolicy.class.getName());
+
+    DFSTestUtil.formatNameNode(conf);
+    namenode = new NameNode(conf);
+
+    final BlockManager bm = namenode.getNamesystem().getBlockManager();
+    placementPolicy = bm.getBlockPlacementPolicy();
+    cluster = bm.getDatanodeManager().getNetworkTopology();
+    for (int i = 0; i < nodesPerRack * numRacks; i++) {
+      cluster.add(dataNodes[i]);
+    }
+
+    setupDataNodeCapacity();
+  }
+
+  private static void updateHeartbeatWithUsage(DatanodeDescriptor dn,
+      long capacity, long dfsUsed, long remaining, long blockPoolUsed,
+      long dnCacheCapacity, long dnCacheUsed, int xceiverCount,
+      int volFailures) {
+    dn.getStorageInfos()[0].setUtilizationForTesting(
+        capacity, dfsUsed, remaining, blockPoolUsed);
+    dn.updateHeartbeat(
+        BlockManagerTestUtil.getStorageReportsForDatanode(dn),
+        dnCacheCapacity, dnCacheUsed, xceiverCount, volFailures, null);
+  }
+
+  private static void setupDataNodeCapacity() {
+    for (int i = 0; i < nodesPerRack * numRacks; i++) {
+      if ((i % 2) == 0) {
+        // remaining 100%
+        updateHeartbeatWithUsage(dataNodes[i], 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
+          0L, 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, 0L, 0L, 0L, 0, 0);
+      } else {
+        // remaining 50%
+        updateHeartbeatWithUsage(dataNodes[i], 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
+          HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
+              * blockSize, 0L, 0L, 0L, 0, 0);
+      }
+    }
+  }
+
+  /*
+   * To verify that the BlockPlacementPolicy can be replaced by AvailableSpaceBlockPlacementPolicy via
+   * changing the configuration.
+   */
+  @Test
+  public void testPolicyReplacement() {
+    Assert.assertTrue((placementPolicy instanceof AvailableSpaceBlockPlacementPolicy));
+  }
+
+  /*
+   * Call choose target many times and verify that nodes with more remaining percent will be chosen
+   * with high possibility.
+   */
+  @Test
+  public void testChooseTarget() {
+    int total = 0;
+    int moreRemainingNode = 0;
+    for (int i = 0; i < chooseTimes; i++) {
+      DatanodeStorageInfo[] targets =
+          namenode
+              .getNamesystem()
+              .getBlockManager()
+              .getBlockPlacementPolicy()
+              .chooseTarget(file, replica, null, new ArrayList<DatanodeStorageInfo>(), false, null,
+                blockSize, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+
+      Assert.assertTrue(targets.length == replica);
+      for (int j = 0; j < replica; j++) {
+        total++;
+        if (targets[j].getDatanodeDescriptor().getRemainingPercent() > 60) {
+          moreRemainingNode++;
+        }
+      }
+    }
+    Assert.assertTrue(total == replica * chooseTimes);
+    double possibility = 1.0 * moreRemainingNode / total;
+    Assert.assertTrue(possibility > 0.52);
+    Assert.assertTrue(possibility < 0.55);
+  }
+
+  @AfterClass
+  public static void teardownCluster() {
+    if (namenode != null) {
+      namenode.stop();
+    }
+  }
+}


[35/50] [abbrv] hadoop git commit: HADOOP-11970. Replace uses of ThreadLocal with JDK7 ThreadLocalRandom (Sean Busbey via Colin P. McCabe)

Posted by ji...@apache.org.
HADOOP-11970. Replace uses of ThreadLocal<Random> with JDK7 ThreadLocalRandom (Sean Busbey via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/470c87db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/470c87db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/470c87db

Branch: refs/heads/HDFS-7240
Commit: 470c87dbc6c24dd3b370f1ad9e7ab1f6dabd2080
Parents: c97f32e
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue May 19 10:49:17 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue May 19 10:50:15 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/io/retry/RetryPolicies.java   | 14 ++----
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  5 ++-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    | 16 +------
 .../server/blockmanagement/BlockManager.java    |  8 ++--
 .../server/blockmanagement/DatanodeManager.java |  3 +-
 .../server/blockmanagement/Host2NodesMap.java   |  4 +-
 .../hdfs/server/datanode/BPServiceActor.java    | 10 ++---
 .../hdfs/server/datanode/DirectoryScanner.java  |  5 ++-
 .../datanode/metrics/DataNodeMetrics.java       |  5 ++-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  3 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |  4 +-
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 22 +++++-----
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  |  6 ++-
 .../blockmanagement/TestReplicationPolicy.java  | 46 ++++++++++++--------
 .../hdfs/server/namenode/TestFileTruncate.java  | 10 ++---
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  4 +-
 .../hdfs/server/namenode/ha/TestHAAppend.java   |  6 +--
 .../hadoop/hdfs/util/TestByteArrayManager.java  | 14 +++---
 .../sharedcache/SharedCacheUploader.java        | 11 +----
 21 files changed, 101 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index baf9a0f..10da9d7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -595,6 +595,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
     split calculation (gera)
 
+    HADOOP-11970. Replace uses of ThreadLocal<Random> with JDK7
+    ThreadLocalRandom.  (Sean Busbey via Colin P. McCabe)
+
   BUG FIXES
     HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
     is an I/O error during requestShortCircuitShm (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 14ded8e..a86f443 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -28,7 +28,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -47,13 +47,6 @@ public class RetryPolicies {
   
   public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
   
-  private static ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
-    @Override
-    protected Random initialValue() {
-      return new Random();
-    }
-  };
-  
   /**
    * <p>
    * Try once, and fail by re-throwing the exception.
@@ -321,7 +314,8 @@ public class RetryPolicies {
       }
 
       //calculate sleep time and return.
-      final double ratio = RANDOM.get().nextDouble() + 0.5;//0.5 <= ratio <=1.5
+      // ensure 0.5 <= ratio <=1.5
+      final double ratio = ThreadLocalRandom.current().nextDouble() + 0.5;
       final long sleepTime = Math.round(p.sleepMillis * ratio);
       return new RetryAction(RetryAction.RetryDecision.RETRY, sleepTime);
     }
@@ -610,7 +604,7 @@ public class RetryPolicies {
   private static long calculateExponentialTime(long time, int retries,
       long cap) {
     long baseTime = Math.min(time * (1L << retries), cap);
-    return (long) (baseTime * (RANDOM.get().nextDouble() + 0.5));
+    return (long) (baseTime * (ThreadLocalRandom.current().nextDouble() + 0.5));
   }
 
   private static long calculateExponentialTime(long time, int retries) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index f32702e..a2b9760 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -48,6 +48,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -316,7 +317,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     
     this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
     this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + 
-        DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
+        ThreadLocalRandom.current().nextInt()  + "_" +
+        Thread.currentThread().getId();
     int numResponseToDrop = conf.getInt(
         DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
         DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 3290223..d1e0b9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -40,6 +40,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.Future;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -976,7 +977,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
           // expanded to 9000ms. 
           final int timeWindow = dfsClient.getConf().getTimeWindow();
           double waitTime = timeWindow * failures +       // grace period for the last round of attempt
-            timeWindow * (failures + 1) * DFSUtil.getRandom().nextDouble(); // expanding time window for each failure
+              // expanding time window for each failure
+              timeWindow * (failures + 1) *
+              ThreadLocalRandom.current().nextDouble();
           DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec.");
           Thread.sleep((long)waitTime);
         } catch (InterruptedException iex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index a925a60..5f501c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -50,8 +50,8 @@ import java.util.Date;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
 
 import javax.net.SocketFactory;
 
@@ -103,12 +103,6 @@ public class DFSUtil {
   public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName());
   
   private DFSUtil() { /* Hidden constructor */ }
-  private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
-    @Override
-    protected Random initialValue() {
-      return new Random();
-    }
-  };
   
   private static final ThreadLocal<SecureRandom> SECURE_RANDOM = new ThreadLocal<SecureRandom>() {
     @Override
@@ -117,11 +111,6 @@ public class DFSUtil {
     }
   };
 
-  /** @return a pseudo random number generator. */
-  public static Random getRandom() {
-    return RANDOM.get();
-  }
-  
   /** @return a pseudo secure random number generator. */
   public static SecureRandom getSecureRandom() {
     return SECURE_RANDOM.get();
@@ -130,9 +119,8 @@ public class DFSUtil {
   /** Shuffle the elements in the given array. */
   public static <T> T[] shuffle(final T[] array) {
     if (array != null && array.length > 0) {
-      final Random random = getRandom();
       for (int n = array.length; n > 1; ) {
-        final int randomIndex = random.nextInt(n);
+        final int randomIndex = ThreadLocalRandom.current().nextInt(n);
         n--;
         if (n != randomIndex) {
           final T tmp = array[randomIndex];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6d5808e..8012f71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -35,6 +35,7 @@ import java.util.Queue;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -1027,7 +1028,8 @@ public class BlockManager {
       return new BlocksWithLocations(new BlockWithLocations[0]);
     }
     Iterator<BlockInfoContiguous> iter = node.getBlockIterator();
-    int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
+    // starting from a random block
+    int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
     // skip blocks
     for(int i=0; i<startBlock; i++) {
       iter.next();
@@ -1669,7 +1671,7 @@ public class BlockManager {
       // switch to a different node randomly
       // this to prevent from deterministically selecting the same node even
       // if the node failed to replicate the block on previous iterations
-      if(DFSUtil.getRandom().nextBoolean())
+      if(ThreadLocalRandom.current().nextBoolean())
         srcNode = node;
     }
     if(numReplicas != null)
@@ -1920,7 +1922,7 @@ public class BlockManager {
           datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan();
       long base = getPostponedMisreplicatedBlocksCount() - blocksPerRescan;
       if (base > 0) {
-        startIndex = DFSUtil.getRandom().nextLong() % (base+1);
+        startIndex = ThreadLocalRandom.current().nextLong() % (base+1);
         if (startIndex < 0) {
           startIndex += (base+1);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 7769a78..01f7972 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -51,6 +51,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.*;
+import java.util.concurrent.ThreadLocalRandom;
 
 /**
  * Manage datanodes, include decommission and other activities.
@@ -457,7 +458,7 @@ public class DatanodeManager {
         // Try something rack local.
         if (node == null && !rackNodes.isEmpty()) {
           node = (DatanodeDescriptor) (rackNodes
-              .get(DFSUtil.getRandom().nextInt(rackNodes.size())));
+              .get(ThreadLocalRandom.current().nextInt(rackNodes.size())));
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
index 420c141..289b79a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
@@ -20,12 +20,12 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSUtil;
 
 /** A map from host names to datanode descriptors. */
 @InterfaceAudience.Private
@@ -161,7 +161,7 @@ class Host2NodesMap {
         return nodes[0];
       }
       // more than one node
-      return nodes[DFSUtil.getRandom().nextInt(nodes.length)];
+      return nodes[ThreadLocalRandom.current().nextInt(nodes.length)];
     } finally {
       hostmapLock.readLock().unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 5bc505f..63a0bb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -28,13 +28,13 @@ import java.util.Collection;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
 
 import com.google.common.base.Joiner;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -122,7 +122,7 @@ class BPServiceActor implements Runnable {
     this.dn = bpos.getDataNode();
     this.nnAddr = nnAddr;
     this.dnConf = dn.getDnConf();
-    prevBlockReportId = DFSUtil.getRandom().nextLong();
+    prevBlockReportId = ThreadLocalRandom.current().nextLong();
     scheduler = new Scheduler(dnConf.heartBeatInterval, dnConf.blockReportInterval);
   }
 
@@ -409,7 +409,7 @@ class BPServiceActor implements Runnable {
     // not send a 0 value ourselves.
     prevBlockReportId++;
     while (prevBlockReportId == 0) {
-      prevBlockReportId = DFSUtil.getRandom().nextLong();
+      prevBlockReportId = ThreadLocalRandom.current().nextLong();
     }
     return prevBlockReportId;
   }
@@ -1054,7 +1054,7 @@ class BPServiceActor implements Runnable {
       if (delay > 0) { // send BR after random delay
         // Numerical overflow is possible here and is okay.
         nextBlockReportTime =
-            monotonicNow() + DFSUtil.getRandom().nextInt((int) (delay));
+            monotonicNow() + ThreadLocalRandom.current().nextInt((int) (delay));
       } else { // send at next heartbeat
         nextBlockReportTime = monotonicNow();
       }
@@ -1073,7 +1073,7 @@ class BPServiceActor implements Runnable {
       // time before we start the periodic block reports.
       if (resetBlockReportTime) {
         nextBlockReportTime = monotonicNow() +
-            DFSUtil.getRandom().nextInt((int)(blockReportIntervalMs));
+            ThreadLocalRandom.current().nextInt((int)(blockReportIntervalMs));
         resetBlockReportTime = false;
       } else {
         /* say the last block report was at 8:20:14. The current report

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 8453094..3383d0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -31,6 +31,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -41,7 +42,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -327,7 +327,8 @@ public class DirectoryScanner implements Runnable {
 
   void start() {
     shouldRun = true;
-    long offset = DFSUtil.getRandom().nextInt((int) (scanPeriodMsecs/1000L)) * 1000L; //msec
+    long offset = ThreadLocalRandom.current().nextInt(
+        (int) (scanPeriodMsecs/1000L)) * 1000L; //msec
     long firstScanTime = Time.now() + offset;
     LOG.info("Periodic Directory Tree Verification scan starting at " 
         + firstScanTime + " with interval " + scanPeriodMsecs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 2e62b3c..a0f25da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -22,7 +22,6 @@ import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
@@ -33,6 +32,8 @@ import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 
+import java.util.concurrent.ThreadLocalRandom;
+
 /**
  *
  * This class is for maintaining  the various DataNode statistics
@@ -177,7 +178,7 @@ public class DataNodeMetrics {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
     String name = "DataNodeActivity-"+ (dnName.isEmpty()
-        ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
+        ? "UndefinedDataNodeName"+ ThreadLocalRandom.current().nextInt()
             : dnName.replace(':', '-'));
 
     // Percentile measurement is off by default, by watching no intervals

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 26a13bd..14647f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -599,7 +600,7 @@ public class NNStorage extends Storage implements Closeable,
   private static int newNamespaceID() {
     int newID = 0;
     while(newID == 0)
-      newID = DFSUtil.getRandom().nextInt(0x7FFFFFFF);  // use 31 bits only
+      newID = ThreadLocalRandom.current().nextInt(0x7FFFFFFF);  // use 31 bits
     return newID;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 44dba28..0daf367 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -32,6 +32,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeSet;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
@@ -44,7 +45,6 @@ import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.net.Peer;
@@ -933,7 +933,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     }
     DatanodeInfo chosenNode;
     do {
-      chosenNode = nodes[DFSUtil.getRandom().nextInt(nodes.length)];
+      chosenNode = nodes[ThreadLocalRandom.current().nextInt(nodes.length)];
     } while (deadNodes.contains(chosenNode));
     return chosenNode;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index e80e14f..d67ceb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -29,6 +29,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
@@ -214,8 +215,7 @@ public class TestAppendSnapshotTruncate {
     
     @Override
     public String call() throws Exception {
-      final Random r = DFSUtil.getRandom();
-      final int op = r.nextInt(6);
+      final int op = ThreadLocalRandom.current().nextInt(6);
       if (op <= 1) {
         pauseAllFiles();
         try {
@@ -229,7 +229,8 @@ public class TestAppendSnapshotTruncate {
         if (keys.length == 0) {
           return "NO-OP";
         }
-        final String snapshot = keys[r.nextInt(keys.length)];
+        final String snapshot = keys[ThreadLocalRandom.current()
+            .nextInt(keys.length)];
         final String s = checkSnapshot(snapshot);
         
         if (op == 2) {
@@ -292,13 +293,13 @@ public class TestAppendSnapshotTruncate {
 
     @Override
     public String call() throws IOException {
-      final Random r = DFSUtil.getRandom();
-      final int op = r.nextInt(9);
+      final int op = ThreadLocalRandom.current().nextInt(9);
       if (op == 0) {
         return checkFullFile();
       } else {
-        final int nBlocks = r.nextInt(4) + 1;
-        final int lastBlockSize = r.nextInt(BLOCK_SIZE) + 1;
+        final int nBlocks = ThreadLocalRandom.current().nextInt(4) + 1;
+        final int lastBlockSize = ThreadLocalRandom.current()
+            .nextInt(BLOCK_SIZE) + 1;
         final int nBytes = nBlocks*BLOCK_SIZE + lastBlockSize;
 
         if (op <= 4) {
@@ -316,8 +317,8 @@ public class TestAppendSnapshotTruncate {
           .append(n).append(" bytes to ").append(file.getName());
 
       final byte[] bytes = new byte[n];
-      DFSUtil.getRandom().nextBytes(bytes);
-      
+      ThreadLocalRandom.current().nextBytes(bytes);
+
       { // write to local file
         final FileOutputStream out = new FileOutputStream(localFile, true);
         out.write(bytes, 0, bytes.length);
@@ -446,7 +447,6 @@ public class TestAppendSnapshotTruncate {
         final Thread t = new Thread(null, new Runnable() {
           @Override
           public void run() {
-            final Random r = DFSUtil.getRandom();
             for(State s; !(s = checkErrorState()).isTerminated;) {
               if (s == State.RUNNING) {
                 isCalling.set(true);
@@ -458,7 +458,7 @@ public class TestAppendSnapshotTruncate {
                 }
                 isCalling.set(false);
               }
-              sleep(r.nextInt(100) + 50);
+              sleep(ThreadLocalRandom.current().nextInt(100) + 50);
             }
           }
         }, name);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 8baebd8..c4c890f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -272,7 +273,7 @@ public class TestRollingUpgrade {
 
       final Path file = new Path(foo, "file");
       final byte[] data = new byte[1024];
-      DFSUtil.getRandom().nextBytes(data);
+      ThreadLocalRandom.current().nextBytes(data);
       final FSDataOutputStream out = cluster.getFileSystem().create(file);
       out.write(data, 0, data.length);
       out.close();
@@ -320,7 +321,8 @@ public class TestRollingUpgrade {
     Assert.assertTrue(dfs.exists(bar));
 
     //truncate a file
-    final int newLength = DFSUtil.getRandom().nextInt(data.length - 1) + 1;
+    final int newLength = ThreadLocalRandom.current().nextInt(data.length - 1)
+        + 1;
     dfs.truncate(file, newLength);
     TestFileTruncate.checkBlockRecovery(file, dfs);
     AppendTestUtil.checkFullFile(dfs, file, newLength, data);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index f117ef7..3226578 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -36,6 +36,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -75,7 +76,6 @@ public class TestReplicationPolicy {
     ((Log4JLogger)BlockPlacementPolicy.LOG).getLogger().setLevel(Level.ALL);
   }
 
-  private final Random random = DFSUtil.getRandom();
   private static final int BLOCK_SIZE = 1024;
   private static final int NUM_OF_DATANODES = 6;
   private static NetworkTopology cluster;
@@ -850,15 +850,17 @@ public class TestReplicationPolicy {
           .getNamesystem().getBlockManager().neededReplications;
       for (int i = 0; i < 100; i++) {
         // Adding the blocks directly to normal priority
-        neededReplications.add(new Block(random.nextLong()), 2, 0, 3);
+        neededReplications.add(new Block(ThreadLocalRandom.current()
+            .nextLong()), 2, 0, 3);
       }
       // Lets wait for the replication interval, to start process normal
       // priority blocks
       Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
       
       // Adding the block directly to high priority list
-      neededReplications.add(new Block(random.nextLong()), 1, 0, 3);
-      
+      neededReplications.add(new Block(ThreadLocalRandom.current().nextLong()),
+          1, 0, 3);
+
       // Lets wait for the replication interval
       Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
 
@@ -880,19 +882,24 @@ public class TestReplicationPolicy {
 
     for (int i = 0; i < 5; i++) {
       // Adding QUEUE_HIGHEST_PRIORITY block
-      underReplicatedBlocks.add(new Block(random.nextLong()), 1, 0, 3);
+      underReplicatedBlocks.add(new Block(ThreadLocalRandom.current()
+          .nextLong()), 1, 0, 3);
 
       // Adding QUEUE_VERY_UNDER_REPLICATED block
-      underReplicatedBlocks.add(new Block(random.nextLong()), 2, 0, 7);
+      underReplicatedBlocks.add(new Block(ThreadLocalRandom.current()
+          .nextLong()), 2, 0, 7);
 
       // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
-      underReplicatedBlocks.add(new Block(random.nextLong()), 6, 0, 6);
+      underReplicatedBlocks.add(new Block(ThreadLocalRandom.current()
+          .nextLong()), 6, 0, 6);
 
       // Adding QUEUE_UNDER_REPLICATED block
-      underReplicatedBlocks.add(new Block(random.nextLong()), 5, 0, 6);
+      underReplicatedBlocks.add(new Block(ThreadLocalRandom.current()
+          .nextLong()), 5, 0, 6);
 
       // Adding QUEUE_WITH_CORRUPT_BLOCKS block
-      underReplicatedBlocks.add(new Block(random.nextLong()), 0, 0, 3);
+      underReplicatedBlocks.add(new Block(ThreadLocalRandom.current()
+          .nextLong()), 0, 0, 3);
     }
 
     // Choose 6 blocks from UnderReplicatedBlocks. Then it should pick 5 blocks
@@ -908,7 +915,8 @@ public class TestReplicationPolicy {
     assertTheChosenBlocks(chosenBlocks, 0, 4, 5, 1, 0);
 
     // Adding QUEUE_HIGHEST_PRIORITY
-    underReplicatedBlocks.add(new Block(random.nextLong()), 1, 0, 3);
+    underReplicatedBlocks.add(new Block(ThreadLocalRandom.current().nextLong()),
+        1, 0, 3);
 
     // Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 1 block from
     // QUEUE_HIGHEST_PRIORITY, 4 blocks from QUEUE_REPLICAS_BADLY_DISTRIBUTED
@@ -1100,9 +1108,9 @@ public class TestReplicationPolicy {
   public void testUpdateDoesNotCauseSkippedReplication() {
     UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
 
-    Block block1 = new Block(random.nextLong());
-    Block block2 = new Block(random.nextLong());
-    Block block3 = new Block(random.nextLong());
+    Block block1 = new Block(ThreadLocalRandom.current().nextLong());
+    Block block2 = new Block(ThreadLocalRandom.current().nextLong());
+    Block block3 = new Block(ThreadLocalRandom.current().nextLong());
 
     // Adding QUEUE_VERY_UNDER_REPLICATED block
     final int block1CurReplicas = 2;
@@ -1149,8 +1157,8 @@ public class TestReplicationPolicy {
     BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
     UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
 
-    Block block1 = new Block(random.nextLong());
-    Block block2 = new Block(random.nextLong());
+    Block block1 = new Block(ThreadLocalRandom.current().nextLong());
+    Block block2 = new Block(ThreadLocalRandom.current().nextLong());
 
     // Adding QUEUE_UNDER_REPLICATED block
     underReplicatedBlocks.add(block1, 0, 1, 1);
@@ -1195,8 +1203,8 @@ public class TestReplicationPolicy {
     BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
     UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
 
-    Block block1 = new Block(random.nextLong());
-    Block block2 = new Block(random.nextLong());
+    Block block1 = new Block(ThreadLocalRandom.current().nextLong());
+    Block block2 = new Block(ThreadLocalRandom.current().nextLong());
 
     // Adding QUEUE_UNDER_REPLICATED block
     underReplicatedBlocks.add(block1, 0, 1, 1);
@@ -1258,8 +1266,8 @@ public class TestReplicationPolicy {
     BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
     UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
 
-    Block block1 = new Block(random.nextLong());
-    Block block2 = new Block(random.nextLong());
+    Block block1 = new Block(ThreadLocalRandom.current().nextLong());
+    Block block2 = new Block(ThreadLocalRandom.current().nextLong());
 
     // Adding QUEUE_UNDER_REPLICATED block
     underReplicatedBlocks.add(block1, 0, 1, 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 1f65f78..222f22b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -31,6 +31,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -46,7 +47,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -164,11 +164,11 @@ public class TestFileTruncate {
     fs.mkdirs(dir);
     final Path p = new Path(dir, "file");
     final byte[] data = new byte[100 * BLOCK_SIZE];
-    DFSUtil.getRandom().nextBytes(data);
+    ThreadLocalRandom.current().nextBytes(data);
     writeContents(data, data.length, p);
 
     for(int n = data.length; n > 0; ) {
-      final int newLength = DFSUtil.getRandom().nextInt(n);
+      final int newLength = ThreadLocalRandom.current().nextInt(n);
       final boolean isReady = fs.truncate(p, newLength);
       LOG.info("newLength=" + newLength + ", isReady=" + isReady);
       assertEquals("File must be closed for truncating at the block boundary",
@@ -193,7 +193,7 @@ public class TestFileTruncate {
     fs.allowSnapshot(dir);
     final Path p = new Path(dir, "file");
     final byte[] data = new byte[BLOCK_SIZE];
-    DFSUtil.getRandom().nextBytes(data);
+    ThreadLocalRandom.current().nextBytes(data);
     writeContents(data, data.length, p);
     final String snapshot = "s0";
     fs.createSnapshot(dir, snapshot);
@@ -226,7 +226,7 @@ public class TestFileTruncate {
     final Path p = new Path(dir, "file");
     final byte[] data = new byte[2 * BLOCK_SIZE];
 
-    DFSUtil.getRandom().nextBytes(data);
+    ThreadLocalRandom.current().nextBytes(data);
     writeContents(data, data.length, p);
 
     final int newLength = data.length - 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
index 42bf46f..04b7b94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
@@ -24,6 +24,7 @@ import java.io.PrintWriter;
 import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadLocalRandom;
 
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
@@ -37,7 +38,6 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -640,7 +640,7 @@ public class TestDNFencing {
       Collection<DatanodeStorageInfo> chooseFrom = !first.isEmpty() ? first : second;
 
       List<DatanodeStorageInfo> l = Lists.newArrayList(chooseFrom);
-      return l.get(DFSUtil.getRandom().nextInt(l.size()));
+      return l.get(ThreadLocalRandom.current().nextInt(l.size()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
index e0aad02..7b7100b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
 import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -27,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
@@ -73,7 +73,7 @@ public class TestHAAppend {
       Path fileToTruncate = new Path("/FileToTruncate");
       
       final byte[] data = new byte[1 << 16];
-      DFSUtil.getRandom().nextBytes(data);
+      ThreadLocalRandom.current().nextBytes(data);
       final int[] appendPos = AppendTestUtil.randomFilePartition(
           data.length, COUNT);
       final int[] truncatePos = AppendTestUtil.randomFilePartition(
@@ -136,4 +136,4 @@ public class TestHAAppend {
       }
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
index 1348033..972e51e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
@@ -27,6 +27,7 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -34,7 +35,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
 import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
@@ -72,7 +72,7 @@ public class TestByteArrayManager {
     final long countResetTimePeriodMs = 200L;
     final Counter c = new Counter(countResetTimePeriodMs);
 
-    final int n = DFSUtil.getRandom().nextInt(512) + 512;
+    final int n = ThreadLocalRandom.current().nextInt(512) + 512;
     final List<Future<Integer>> futures = new ArrayList<Future<Integer>>(n);
     
     final ExecutorService pool = Executors.newFixedThreadPool(32);
@@ -334,7 +334,7 @@ public class TestByteArrayManager {
       public void run() {
         LOG.info("randomRecycler start");
         for(int i = 0; shouldRun(); i++) {
-          final int j = DFSUtil.getRandom().nextInt(runners.length);
+          final int j = ThreadLocalRandom.current().nextInt(runners.length);
           try {
             runners[j].recycle();
           } catch (Exception e) {
@@ -440,7 +440,7 @@ public class TestByteArrayManager {
         public byte[] call() throws Exception {
           final int lower = maxArrayLength == ByteArrayManager.MIN_ARRAY_LENGTH?
               0: maxArrayLength >> 1;
-          final int arrayLength = DFSUtil.getRandom().nextInt(
+          final int arrayLength = ThreadLocalRandom.current().nextInt(
               maxArrayLength - lower) + lower + 1;
           final byte[] array = bam.newByteArray(arrayLength);
           try {
@@ -496,7 +496,8 @@ public class TestByteArrayManager {
     @Override
     public void run() {
       for(int i = 0; i < n; i++) {
-        final boolean isAllocate = DFSUtil.getRandom().nextInt(NUM_RUNNERS) < p;
+        final boolean isAllocate = ThreadLocalRandom.current()
+            .nextInt(NUM_RUNNERS) < p;
         if (isAllocate) {
           submitAllocate();
         } else {
@@ -573,7 +574,6 @@ public class TestByteArrayManager {
         + ", nAllocations=" + nAllocations
         + ", maxArrays=" + maxArrays);
     
-    final Random ran = DFSUtil.getRandom();
     final ByteArrayManager[] impls = {
         new ByteArrayManager.NewByteArrayWithoutLimit(),
         new NewByteArrayWithLimit(maxArrays),
@@ -590,7 +590,7 @@ public class TestByteArrayManager {
       for(int j = 0; j < nTrials; j++) {
         final int[] sleepTime = new int[nAllocations];
         for(int k = 0; k < sleepTime.length; k++) {
-          sleepTime[k] = ran.nextInt(100);
+          sleepTime[k] = ThreadLocalRandom.current().nextInt(100);
         }
       
         final long elapsed = performanceTest(arrayLength, maxArrays, nThreads,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
index 050d531..682b272 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
@@ -22,8 +22,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.net.URISyntaxException;
-import java.util.Random;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -61,13 +61,6 @@ class SharedCacheUploader implements Callable<Boolean> {
       new FsPermission((short)00555);
 
   private static final Log LOG = LogFactory.getLog(SharedCacheUploader.class);
-  private static final ThreadLocal<Random> randomTl =
-      new ThreadLocal<Random>() {
-        @Override
-        protected Random initialValue() {
-          return new Random(System.nanoTime());
-        }
-      };
 
   private final LocalResource resource;
   private final Path localPath;
@@ -267,7 +260,7 @@ class SharedCacheUploader implements Callable<Boolean> {
   }
 
   private String getTemporaryFileName(Path path) {
-    return path.getName() + "-" + randomTl.get().nextLong();
+    return path.getName() + "-" + ThreadLocalRandom.current().nextLong();
   }
 
   @VisibleForTesting


[30/50] [abbrv] hadoop git commit: HADOOP-11103. Clean up RemoteException (Contributed by Sean Busbey)

Posted by ji...@apache.org.
HADOOP-11103. Clean up RemoteException (Contributed by Sean Busbey)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4a2830b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4a2830b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4a2830b

Branch: refs/heads/HDFS-7240
Commit: d4a2830b63f0819979b592f4ea6ea3abd5885b71
Parents: f889a49
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue May 19 14:41:05 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue May 19 14:41:05 2015 +0530

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../main/java/org/apache/hadoop/ipc/Client.java |  5 +--
 .../org/apache/hadoop/ipc/RemoteException.java  | 32 +++++++++++++++-----
 .../java/org/apache/hadoop/ipc/TestRPC.java     |  4 +++
 .../hdfs/server/namenode/ha/TestHASafeMode.java |  3 ++
 5 files changed, 35 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ee7d1e3..8c7c978 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -588,6 +588,8 @@ Release 2.8.0 - UNRELEASED
 
     HADOOP-1540. Support file exclusion list in distcp. (Rich Haase via jing9)
 
+    HADOOP-11103. Clean up RemoteException (Sean Busbey via vinayakumarb)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 97b715b..f28d8a2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1139,10 +1139,7 @@ public class Client {
           if (erCode == null) {
              LOG.warn("Detailed error code not set by server on rpc error");
           }
-          RemoteException re = 
-              ( (erCode == null) ? 
-                  new RemoteException(exceptionClassName, errorMsg) :
-              new RemoteException(exceptionClassName, errorMsg, erCode));
+          RemoteException re = new RemoteException(exceptionClassName, errorMsg, erCode);
           if (status == RpcStatusProto.ERROR) {
             calls.remove(callId);
             call.setException(re);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
index 7926d86..620e100 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
@@ -25,31 +25,46 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.Rpc
 import org.xml.sax.Attributes;
 
 public class RemoteException extends IOException {
+  /** this value should not be defined in RpcHeader.proto so that protobuf will return a null */
+  private static final int UNSPECIFIED_ERROR = -1;
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;
   private final int errorCode;
 
-  private String className;
+  private final String className;
   
+  /**
+   * @param className wrapped exception, may be null
+   * @param msg may be null
+   */
   public RemoteException(String className, String msg) {
-    super(msg);
-    this.className = className;
-    errorCode = -1;
+    this(className, msg, null);
   }
   
+  /**
+   * @param className wrapped exception, may be null
+   * @param msg may be null
+   * @param erCode may be null
+   */
   public RemoteException(String className, String msg, RpcErrorCodeProto erCode) {
     super(msg);
     this.className = className;
     if (erCode != null)
       errorCode = erCode.getNumber();
     else 
-      errorCode = -1;
+      errorCode = UNSPECIFIED_ERROR;
   }
   
+  /**
+   * @return the class name for the wrapped exception; may be null if none was given.
+   */
   public String getClassName() {
     return className;
   }
   
+  /**
+   * @return may be null if the code was newer than our protobuf definitions or none was given.
+   */
   public RpcErrorCodeProto getErrorCode() {
     return RpcErrorCodeProto.valueOf(errorCode);
   }
@@ -60,7 +75,7 @@ public class RemoteException extends IOException {
    * <p>
    * Unwraps any IOException.
    * 
-   * @param lookupTypes the desired exception class.
+   * @param lookupTypes the desired exception class. may be null.
    * @return IOException, which is either the lookupClass exception or this.
    */
   public IOException unwrapRemoteException(Class<?>... lookupTypes) {
@@ -108,7 +123,10 @@ public class RemoteException extends IOException {
     return ex;
   }
 
-  /** Create RemoteException from attributes */
+  /**
+   * Create RemoteException from attributes
+   * @param attrs may not be null
+   */
   public static RemoteException valueOf(Attributes attrs) {
     return new RemoteException(attrs.getValue("class"),
         attrs.getValue("message")); 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index f049395..d36a671 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
@@ -589,6 +590,7 @@ public class TestRPC {
       }
     } catch (RemoteException e) {
       if (expectFailure) {
+        assertEquals("RPC error code should be UNAUTHORIZED", RpcErrorCodeProto.FATAL_UNAUTHORIZED, e.getErrorCode());
         assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
       } else {
         throw e;
@@ -728,6 +730,7 @@ public class TestRPC {
       proxy.echo("");
     } catch (RemoteException e) {
       LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
+      assertEquals("RPC error code should be UNAUTHORIZED", RpcErrorCodeProto.FATAL_UNAUTHORIZED, e.getErrorCode());
       assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
       succeeded = true;
     } finally {
@@ -757,6 +760,7 @@ public class TestRPC {
       proxy.echo("");
     } catch (RemoteException e) {
       LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
+      assertEquals("RPC error code should be UNAUTHORIZED", RpcErrorCodeProto.FATAL_UNAUTHORIZED, e.getErrorCode());
       assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
       succeeded = true;
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
index 86f3e7b..9ded0ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
@@ -774,6 +775,8 @@ public class TestHASafeMode {
       fail("StandBy should throw exception for isInSafeMode");
     } catch (IOException e) {
       if (e instanceof RemoteException) {
+        assertEquals("RPC Error code should indicate app failure.", RpcErrorCodeProto.ERROR_APPLICATION,
+            ((RemoteException) e).getErrorCode());
         IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
         assertTrue("StandBy nn should not support isInSafeMode",
             sbExcpetion instanceof StandbyException);


[36/50] [abbrv] hadoop git commit: HDFS-8404. Pending block replication can get stuck using older genstamp. Contributed by Nathan Roberts.

Posted by ji...@apache.org.
HDFS-8404. Pending block replication can get stuck using older genstamp. Contributed by Nathan Roberts.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8860e352
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8860e352
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8860e352

Branch: refs/heads/HDFS-7240
Commit: 8860e352c394372e4eb3ebdf82ea899567f34e4e
Parents: 470c87d
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue May 19 13:05:15 2015 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue May 19 13:05:15 2015 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../server/blockmanagement/BlockManager.java    | 17 ++--
 .../blockmanagement/TestPendingReplication.java | 98 +++++++++++++++++++-
 3 files changed, 108 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8860e352/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 76888a9..5bcaddd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -878,6 +878,9 @@ Release 2.7.1 - UNRELEASED
 
     HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
 
+    HDFS-8404. Pending block replication can get stuck using older genstamp
+    (Nathan Roberts via kihwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8860e352/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8012f71..54981fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1690,13 +1690,18 @@ public class BlockManager {
       namesystem.writeLock();
       try {
         for (int i = 0; i < timedOutItems.length; i++) {
+          /*
+           * Use the blockinfo from the blocksmap to be certain we're working
+           * with the most up-to-date block information (e.g. genstamp).
+           */
+          BlockInfoContiguous bi = blocksMap.getStoredBlock(timedOutItems[i]);
+          if (bi == null) {
+            continue;
+          }
           NumberReplicas num = countNodes(timedOutItems[i]);
-          if (isNeededReplication(timedOutItems[i], getReplication(timedOutItems[i]),
-                                 num.liveReplicas())) {
-            neededReplications.add(timedOutItems[i],
-                                   num.liveReplicas(),
-                                   num.decommissionedAndDecommissioning(),
-                                   getReplication(timedOutItems[i]));
+          if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) {
+            neededReplications.add(bi, num.liveReplicas(),
+                num.decommissionedAndDecommissioning(), getReplication(bi));
           }
         }
       } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8860e352/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
index c63badc..259404e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * This class tests the internals of PendingReplicationBlocks.java,
@@ -52,13 +53,11 @@ public class TestPendingReplication {
   private static final int DFS_REPLICATION_INTERVAL = 1;
   // Number of datanodes in the cluster
   private static final int DATANODE_COUNT = 5;
-
   @Test
   public void testPendingReplication() {
     PendingReplicationBlocks pendingReplications;
     pendingReplications = new PendingReplicationBlocks(TIMEOUT * 1000);
     pendingReplications.start();
-
     //
     // Add 10 blocks to pendingReplications.
     //
@@ -140,8 +139,7 @@ public class TestPendingReplication {
     //
     // Verify that everything has timed out.
     //
-    assertEquals("Size of pendingReplications ",
-                 0, pendingReplications.size());
+    assertEquals("Size of pendingReplications ", 0, pendingReplications.size());
     Block[] timedOut = pendingReplications.getTimedOutBlocks();
     assertTrue(timedOut != null && timedOut.length == 15);
     for (int i = 0; i < timedOut.length; i++) {
@@ -149,6 +147,98 @@ public class TestPendingReplication {
     }
     pendingReplications.stop();
   }
+
+/* Test that processPendingReplications will use the most recent
+ * blockinfo from the blocksmap by placing a larger genstamp into
+ * the blocksmap.
+ */
+  @Test
+  public void testProcessPendingReplications() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setLong(
+        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
+    MiniDFSCluster cluster = null;
+    Block block;
+    BlockInfoContiguous blockInfo;
+    try {
+      cluster =
+          new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
+      cluster.waitActive();
+
+      FSNamesystem fsn = cluster.getNamesystem();
+      BlockManager blkManager = fsn.getBlockManager();
+
+      PendingReplicationBlocks pendingReplications =
+          blkManager.pendingReplications;
+      UnderReplicatedBlocks neededReplications = blkManager.neededReplications;
+      BlocksMap blocksMap = blkManager.blocksMap;
+
+      //
+      // Add 1 block to pendingReplications with GenerationStamp = 0.
+      //
+
+      block = new Block(1, 1, 0);
+      blockInfo = new BlockInfoContiguous(block, (short) 3);
+
+      pendingReplications.increment(block,
+          DatanodeStorageInfo.toDatanodeDescriptors(
+              DFSTestUtil.createDatanodeStorageInfos(1)));
+      BlockCollection bc = Mockito.mock(BlockCollection.class);
+      Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
+      // Place into blocksmap with GenerationStamp = 1
+      blockInfo.setGenerationStamp(1);
+      blocksMap.addBlockCollection(blockInfo, bc);
+
+      assertEquals("Size of pendingReplications ", 1,
+          pendingReplications.size());
+
+      // Add a second block to pendingReplications that has no
+      // corresponding entry in blocksmap
+      block = new Block(2, 2, 0);
+      pendingReplications.increment(block,
+          DatanodeStorageInfo.toDatanodeDescriptors(
+              DFSTestUtil.createDatanodeStorageInfos(1)));
+
+      // verify 2 blocks in pendingReplications
+      assertEquals("Size of pendingReplications ", 2,
+          pendingReplications.size());
+
+      //
+      // Wait for everything to timeout.
+      //
+      while (pendingReplications.size() > 0) {
+        try {
+          Thread.sleep(100);
+        } catch (Exception e) {
+        }
+      }
+
+      //
+      // Verify that block moves to neededReplications
+      //
+      while (neededReplications.size() == 0) {
+        try {
+          Thread.sleep(100);
+        } catch (Exception e) {
+        }
+      }
+
+      // Verify that the generation stamp we will try to replicate
+      // is now 1
+      for (Block b: neededReplications) {
+        assertEquals("Generation stamp is 1 ", 1,
+            b.getGenerationStamp());
+      }
+
+      // Verify size of neededReplications is exactly 1.
+      assertEquals("size of neededReplications is 1 ", 1,
+          neededReplications.size());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
   
   /**
    * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the


[37/50] [abbrv] hadoop git commit: HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get created with ACLs. (Gregory Chanan via asuresh)

Posted by ji...@apache.org.
HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get created with ACLs. (Gregory Chanan via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd3cb533
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd3cb533
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd3cb533

Branch: refs/heads/HDFS-7240
Commit: fd3cb533d2495ea220ab2e468835a43a784d7532
Parents: 8ca1dfe
Author: Arun Suresh <as...@apache.org>
Authored: Tue May 19 11:35:57 2015 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Tue May 19 11:35:57 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../ZKDelegationTokenSecretManager.java         | 12 ++++
 .../TestZKDelegationTokenSecretManager.java     | 60 ++++++++++++++++++++
 3 files changed, 75 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3cb533/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 10da9d7..e4537a3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -748,6 +748,9 @@ Release 2.7.1 - UNRELEASED
     HADOOP-11966. Variable cygwin is undefined in hadoop-config.sh when executed
     through hadoop-daemon.sh. (cnauroth)
 
+    HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
+    created with ACLs. (Gregory Chanan via asuresh)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3cb533/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index 73c3ab8..da0e6ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -47,6 +47,7 @@ import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
 import org.apache.curator.framework.recipes.shared.SharedCount;
 import org.apache.curator.framework.recipes.shared.VersionedValue;
 import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.utils.EnsurePath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -298,6 +299,17 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
       } catch (Exception e) {
         throw new IOException("Could not start Curator Framework", e);
       }
+    } else {
+      // If namespace parents are implicitly created, they won't have ACLs.
+      // So, let's explicitly create them.
+      CuratorFramework nullNsFw = zkClient.usingNamespace(null);
+      EnsurePath ensureNs =
+        nullNsFw.newNamespaceAwareEnsurePath("/" + zkClient.getNamespace());
+      try {
+        ensureNs.ensure(nullNsFw.getZookeeperClient());
+      } catch (Exception e) {
+        throw new IOException("Could not create namespace", e);
+      }
     }
     listenerThreadPool = Executors.newSingleThreadExecutor();
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3cb533/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
index 6435c0b..185a994 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
@@ -19,9 +19,16 @@
 package org.apache.hadoop.security.token.delegation;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 
+import org.apache.curator.RetryPolicy;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.api.ACLProvider;
+import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.curator.test.TestingServer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
@@ -30,6 +37,10 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Id;
+import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -296,6 +307,55 @@ public class TestZKDelegationTokenSecretManager {
     tm1.destroy();
   }
 
+  @Test
+  public void testACLs() throws Exception {
+    DelegationTokenManager tm1;
+    String connectString = zkServer.getConnectString();
+    Configuration conf = getSecretConf(connectString);
+    RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
+    String userPass = "myuser:mypass";
+    final ACL digestACL = new ACL(ZooDefs.Perms.ALL, new Id("digest",
+      DigestAuthenticationProvider.generateDigest(userPass)));
+    ACLProvider digestAclProvider = new ACLProvider() {
+      @Override
+      public List<ACL> getAclForPath(String path) { return getDefaultAcl(); }
+
+      @Override
+      public List<ACL> getDefaultAcl() {
+        List<ACL> ret = new ArrayList<ACL>();
+        ret.add(digestACL);
+        return ret;
+      }
+    };
+
+    CuratorFramework curatorFramework =
+      CuratorFrameworkFactory.builder()
+        .connectString(connectString)
+        .retryPolicy(retryPolicy)
+        .aclProvider(digestAclProvider)
+        .authorization("digest", userPass.getBytes("UTF-8"))
+        .build();
+    curatorFramework.start();
+    ZKDelegationTokenSecretManager.setCurator(curatorFramework);
+    tm1 = new DelegationTokenManager(conf, new Text("bla"));
+    tm1.init();
+
+    // check ACL
+    String workingPath = conf.get(ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH);
+    verifyACL(curatorFramework, "/" + workingPath, digestACL);
+
+    tm1.destroy();
+    ZKDelegationTokenSecretManager.setCurator(null);
+    curatorFramework.close();
+  }
+
+  private void verifyACL(CuratorFramework curatorFramework,
+      String path, ACL expectedACL) throws Exception {
+    List<ACL> acls = curatorFramework.getACL().forPath(path);
+    Assert.assertEquals(1, acls.size());
+    Assert.assertEquals(expectedACL, acls.get(0));
+  }
+
   // Since it is possible that there can be a delay for the cancel token message
   // initiated by one node to reach another node.. The second node can ofcourse
   // verify with ZK directly if the token that needs verification has been


[26/50] [abbrv] hadoop git commit: HADOOP-1540. Support file exclusion list in distcp. Contributed by Rich Haase.

Posted by ji...@apache.org.
HADOOP-1540. Support file exclusion list in distcp. Contributed by Rich Haase.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0790275f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0790275f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0790275f

Branch: refs/heads/HDFS-7240
Commit: 0790275f058b0cf41780ad337c9150a1e8ebebc6
Parents: 76afd28
Author: Jing Zhao <ji...@apache.org>
Authored: Mon May 18 13:24:35 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon May 18 13:24:35 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../org/apache/hadoop/tools/CopyFilter.java     |  60 +++++
 .../apache/hadoop/tools/DistCpConstants.java    |   3 +-
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  11 +-
 .../org/apache/hadoop/tools/DistCpOptions.java  |  30 ++-
 .../org/apache/hadoop/tools/OptionsParser.java  | 267 ++++++++++++-------
 .../apache/hadoop/tools/RegexCopyFilter.java    |  98 +++++++
 .../apache/hadoop/tools/SimpleCopyListing.java  |  23 +-
 .../org/apache/hadoop/tools/TrueCopyFilter.java |  33 +++
 .../org/apache/hadoop/tools/package-info.java   |  26 ++
 .../apache/hadoop/tools/TestCopyListing.java    |  34 ---
 .../apache/hadoop/tools/TestIntegration.java    |  49 ----
 .../apache/hadoop/tools/TestOptionsParser.java  |  17 +-
 .../hadoop/tools/TestRegexCopyFilter.java       | 113 ++++++++
 .../apache/hadoop/tools/TestTrueCopyFilter.java |  36 +++
 15 files changed, 613 insertions(+), 189 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 324434b..cf09c5f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -583,6 +583,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11944. add option to test-patch to avoid relocating patch process
     directory (Sean Busbey via aw)
 
+    HADOOP-1540. Support file exclusion list in distcp. (Rich Haase via jing9)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyFilter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyFilter.java
new file mode 100644
index 0000000..3da364c
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyFilter.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Interface for excluding files from DistCp.
+ *
+ */
+public abstract class CopyFilter {
+
+  /**
+   * Default initialize method does nothing.
+   */
+  public void initialize() {}
+
+  /**
+   * Predicate to determine if a file can be excluded from copy.
+   *
+   * @param path a Path to be considered for copying
+   * @return boolean, true to copy, false to exclude
+   */
+  public abstract boolean shouldCopy(Path path);
+
+  /**
+   * Public factory method which returns the appropriate implementation of
+   * CopyFilter.
+   *
+   * @param conf DistCp configuratoin
+   * @return An instance of the appropriate CopyFilter
+   */
+  public static CopyFilter getCopyFilter(Configuration conf) {
+    String filtersFilename = conf.get(DistCpConstants.CONF_LABEL_FILTERS_FILE);
+
+    if (filtersFilename == null) {
+      return new TrueCopyFilter();
+    } else {
+      String filterFilename = conf.get(
+          DistCpConstants.CONF_LABEL_FILTERS_FILE);
+      return new RegexCopyFilter(filterFilename);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index 7ecb6ce..21dca62 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -59,7 +59,8 @@ public class DistCpConstants {
   public static final String CONF_LABEL_APPEND = "distcp.copy.append";
   public static final String CONF_LABEL_DIFF = "distcp.copy.diff";
   public static final String CONF_LABEL_BANDWIDTH_MB = "distcp.map.bandwidth.mb";
-  
+  public static final String CONF_LABEL_FILTERS_FILE =
+      "distcp.filters.file";
   public static final String CONF_LABEL_MAX_CHUNKS_TOLERABLE =
       "distcp.dynamic.max.chunks.tolerable";
   public static final String CONF_LABEL_MAX_CHUNKS_IDEAL =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index f90319d..ed4a0b2 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -177,7 +177,16 @@ public enum DistCpOptionSwitch {
    * Specify bandwidth per map in MB
    */
   BANDWIDTH(DistCpConstants.CONF_LABEL_BANDWIDTH_MB,
-      new Option("bandwidth", true, "Specify bandwidth per map in MB"));
+      new Option("bandwidth", true, "Specify bandwidth per map in MB")),
+
+  /**
+   * Path containing a list of strings, which when found in the path of
+   * a file to be copied excludes that file from the copy job.
+   */
+  FILTERS(DistCpConstants.CONF_LABEL_FILTERS_FILE,
+      new Option("filters", true, "The path to a file containing a list of"
+          + " strings for paths to be excluded from the copy."));
+
 
   public static final String PRESERVE_STATUS_DEFAULT = "-prbugpct";
   private final String confLabel;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index d8f3ff7..302b626 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -69,7 +69,12 @@ public class DistCpOptions {
 
   private Path targetPath;
 
-  // targetPathExist is a derived field, it's initialized in the 
+  /**
+   * The path to a file containing a list of paths to filter out of the copy.
+   */
+  private String filtersFile;
+
+  // targetPathExist is a derived field, it's initialized in the
   // beginning of distcp.
   private boolean targetPathExists = true;
   
@@ -139,6 +144,7 @@ public class DistCpOptions {
       this.sourcePaths = that.getSourcePaths();
       this.targetPath = that.getTargetPath();
       this.targetPathExists = that.getTargetPathExists();
+      this.filtersFile = that.getFiltersFile();
     }
   }
 
@@ -549,6 +555,23 @@ public class DistCpOptions {
     return this.targetPathExists = targetPathExists;
   }
 
+  /**
+   * File path that contains the list of patterns
+   * for paths to be filtered from the file copy.
+   * @return - Filter  file path.
+   */
+  public final String getFiltersFile() {
+    return filtersFile;
+  }
+
+  /**
+   * Set filtersFile.
+   * @param filtersFilename The path to a list of patterns to exclude from copy.
+   */
+  public final void setFiltersFile(String filtersFilename) {
+    this.filtersFile = filtersFilename;
+  }
+
   public void validate(DistCpOptionSwitch option, boolean value) {
 
     boolean syncFolder = (option == DistCpOptionSwitch.SYNC_FOLDERS ?
@@ -623,6 +646,10 @@ public class DistCpOptions {
         String.valueOf(mapBandwidth));
     DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.PRESERVE_STATUS,
         DistCpUtils.packAttributes(preserveStatus));
+    if (filtersFile != null) {
+      DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.FILTERS,
+          filtersFile);
+    }
   }
 
   /**
@@ -645,6 +672,7 @@ public class DistCpOptions {
         ", targetPath=" + targetPath +
         ", targetPathExists=" + targetPathExists +
         ", preserveRawXattrs=" + preserveRawXattrs +
+        ", filtersFile='" + filtersFile + '\'' +
         '}';
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
index 1729479..37add1e 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
@@ -86,37 +86,7 @@ public class OptionsParser {
         Arrays.toString(args), e);
     }
 
-    DistCpOptions option;
-    Path targetPath;
-    List<Path> sourcePaths = new ArrayList<Path>();
-
-    String leftOverArgs[] = command.getArgs();
-    if (leftOverArgs == null || leftOverArgs.length < 1) {
-      throw new IllegalArgumentException("Target path not specified");
-    }
-
-    //Last Argument is the target path
-    targetPath = new Path(leftOverArgs[leftOverArgs.length -1].trim());
-
-    //Copy any source paths in the arguments to the list
-    for (int index = 0; index < leftOverArgs.length - 1; index++) {
-      sourcePaths.add(new Path(leftOverArgs[index].trim()));
-    }
-
-    /* If command has source file listing, use it else, fall back on source paths in args
-       If both are present, throw exception and bail */
-    if (command.hasOption(DistCpOptionSwitch.SOURCE_FILE_LISTING.getSwitch())) {
-      if (!sourcePaths.isEmpty()) {
-        throw new IllegalArgumentException("Both source file listing and source paths present");
-      }
-      option = new DistCpOptions(new Path(getVal(command, DistCpOptionSwitch.
-              SOURCE_FILE_LISTING.getSwitch())), targetPath);
-    } else {
-      if (sourcePaths.isEmpty()) {
-        throw new IllegalArgumentException("Neither source file listing nor source paths present");
-      }
-      option = new DistCpOptions(sourcePaths, targetPath);
-    }
+    DistCpOptions option = parseSourceAndTargetPaths(command);
 
     //Process all the other option switches and set options appropriately
     if (command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) {
@@ -165,68 +135,23 @@ public class OptionsParser {
       option.setBlocking(false);
     }
 
-    if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) {
-      try {
-        Integer mapBandwidth = Integer.parseInt(
-            getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()).trim());
-        if (mapBandwidth.intValue() <= 0) {
-          throw new IllegalArgumentException("Bandwidth specified is not positive: " +
-              mapBandwidth);
-        }
-        option.setMapBandwidth(mapBandwidth);
-      } catch (NumberFormatException e) {
-        throw new IllegalArgumentException("Bandwidth specified is invalid: " +
-            getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e);
-      }
-    }
+    parseBandwidth(command, option);
 
     if (command.hasOption(DistCpOptionSwitch.SSL_CONF.getSwitch())) {
       option.setSslConfigurationFile(command.
           getOptionValue(DistCpOptionSwitch.SSL_CONF.getSwitch()));
     }
 
-    if (command.hasOption(DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) {
-      try {
-        Integer numThreads = Integer.parseInt(getVal(command,
-              DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()).trim());
-        option.setNumListstatusThreads(numThreads);
-      } catch (NumberFormatException e) {
-        throw new IllegalArgumentException(
-            "Number of liststatus threads is invalid: " + getVal(command,
-                DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e);
-      }
-    }
+    parseNumListStatusThreads(command, option);
 
-    if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) {
-      try {
-        Integer maps = Integer.parseInt(
-            getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()).trim());
-        option.setMaxMaps(maps);
-      } catch (NumberFormatException e) {
-        throw new IllegalArgumentException("Number of maps is invalid: " +
-            getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e);
-      }
-    }
+    parseMaxMaps(command, option);
 
     if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) {
       option.setCopyStrategy(
             getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch()));
     }
 
-    if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
-      String attributes =
-          getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch());
-      if (attributes == null || attributes.isEmpty()) {
-        for (FileAttribute attribute : FileAttribute.values()) {
-          option.preserve(attribute);
-        }
-      } else {
-        for (int index = 0; index < attributes.length(); index++) {
-          option.preserve(FileAttribute.
-              getAttribute(attributes.charAt(index)));
-        }
-      }
-    }
+    parsePreserveStatus(command, option);
 
     if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) {
       String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch());
@@ -235,6 +160,47 @@ public class OptionsParser {
       option.setUseDiff(true, snapshots[0], snapshots[1]);
     }
 
+    parseFileLimit(command);
+
+    parseSizeLimit(command);
+
+    if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) {
+      option.setFiltersFile(getVal(command,
+          DistCpOptionSwitch.FILTERS.getSwitch()));
+    }
+
+    return option;
+  }
+
+  /**
+   * parseSizeLimit is a helper method for parsing the deprecated
+   * argument SIZE_LIMIT.
+   *
+   * @param command command line arguments
+   */
+  private static void parseSizeLimit(CommandLine command) {
+    if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) {
+      String sizeLimitString = getVal(command,
+                              DistCpOptionSwitch.SIZE_LIMIT.getSwitch().trim());
+      try {
+        Long.parseLong(sizeLimitString);
+      }
+      catch (NumberFormatException e) {
+        throw new IllegalArgumentException("Size-limit is invalid: "
+                                            + sizeLimitString, e);
+      }
+      LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" +
+              " option. Ignoring.");
+    }
+  }
+
+  /**
+   * parseFileLimit is a helper method for parsing the deprecated
+   * argument FILE_LIMIT.
+   *
+   * @param command command line arguments
+   */
+  private static void parseFileLimit(CommandLine command) {
     if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) {
       String fileLimitString = getVal(command,
                               DistCpOptionSwitch.FILE_LIMIT.getSwitch().trim());
@@ -246,23 +212,144 @@ public class OptionsParser {
                                             + fileLimitString, e);
       }
       LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" +
-              " option. Ignoring.");
+          " option. Ignoring.");
     }
+  }
 
-    if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) {
-      String sizeLimitString = getVal(command,
-                              DistCpOptionSwitch.SIZE_LIMIT.getSwitch().trim());
+  /**
+   * parsePreserveStatus is a helper method for parsing PRESERVE_STATUS.
+   *
+   * @param command command line arguments
+   * @param option  parsed distcp options
+   */
+  private static void parsePreserveStatus(CommandLine command,
+                                          DistCpOptions option) {
+    if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
+      String attributes =
+          getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch());
+      if (attributes == null || attributes.isEmpty()) {
+        for (FileAttribute attribute : FileAttribute.values()) {
+          option.preserve(attribute);
+        }
+      } else {
+        for (int index = 0; index < attributes.length(); index++) {
+          option.preserve(FileAttribute.
+              getAttribute(attributes.charAt(index)));
+        }
+      }
+    }
+  }
+
+  /**
+   * parseMaxMaps is a helper method for parsing MAX_MAPS.
+   *
+   * @param command command line arguments
+   * @param option  parsed distcp options
+   */
+  private static void parseMaxMaps(CommandLine command,
+                                   DistCpOptions option) {
+    if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) {
       try {
-        Long.parseLong(sizeLimitString);
+        Integer maps = Integer.parseInt(
+            getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()).trim());
+        option.setMaxMaps(maps);
+      } catch (NumberFormatException e) {
+        throw new IllegalArgumentException("Number of maps is invalid: " +
+            getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e);
       }
-      catch (NumberFormatException e) {
-        throw new IllegalArgumentException("Size-limit is invalid: "
-                                            + sizeLimitString, e);
+    }
+  }
+
+  /**
+   * parseNumListStatusThreads is a helper method for parsing
+   * NUM_LISTSTATUS_THREADS.
+   *
+   * @param command command line arguments
+   * @param option  parsed distcp options
+   */
+  private static void parseNumListStatusThreads(CommandLine command,
+                                                DistCpOptions option) {
+    if (command.hasOption(
+        DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) {
+      try {
+        Integer numThreads = Integer.parseInt(getVal(command,
+              DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()).trim());
+        option.setNumListstatusThreads(numThreads);
+      } catch (NumberFormatException e) {
+        throw new IllegalArgumentException(
+            "Number of liststatus threads is invalid: " + getVal(command,
+                DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e);
+      }
+    }
+  }
+
+  /**
+   * parseBandwidth is a helper method for parsing BANDWIDTH.
+   *
+   * @param command command line arguments
+   * @param option  parsed distcp options
+   */
+  private static void parseBandwidth(CommandLine command,
+                                     DistCpOptions option) {
+    if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) {
+      try {
+        Integer mapBandwidth = Integer.parseInt(
+            getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()).trim());
+        if (mapBandwidth <= 0) {
+          throw new IllegalArgumentException("Bandwidth specified is not " +
+              "positive: " + mapBandwidth);
+        }
+        option.setMapBandwidth(mapBandwidth);
+      } catch (NumberFormatException e) {
+        throw new IllegalArgumentException("Bandwidth specified is invalid: " +
+            getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e);
       }
-      LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" +
-              " option. Ignoring.");
     }
+  }
 
+  /**
+   * parseSourceAndTargetPaths is a helper method for parsing the source
+   * and target paths.
+   *
+   * @param command command line arguments
+   * @return        DistCpOptions
+   */
+  private static DistCpOptions parseSourceAndTargetPaths(
+      CommandLine command) {
+    DistCpOptions option;
+    Path targetPath;
+    List<Path> sourcePaths = new ArrayList<Path>();
+
+    String[] leftOverArgs = command.getArgs();
+    if (leftOverArgs == null || leftOverArgs.length < 1) {
+      throw new IllegalArgumentException("Target path not specified");
+    }
+
+    //Last Argument is the target path
+    targetPath = new Path(leftOverArgs[leftOverArgs.length - 1].trim());
+
+    //Copy any source paths in the arguments to the list
+    for (int index = 0; index < leftOverArgs.length - 1; index++) {
+      sourcePaths.add(new Path(leftOverArgs[index].trim()));
+    }
+
+    /* If command has source file listing, use it else, fall back on source
+       paths in args.  If both are present, throw exception and bail */
+    if (command.hasOption(
+        DistCpOptionSwitch.SOURCE_FILE_LISTING.getSwitch())) {
+      if (!sourcePaths.isEmpty()) {
+        throw new IllegalArgumentException("Both source file listing and " +
+            "source paths present");
+      }
+      option = new DistCpOptions(new Path(getVal(command, DistCpOptionSwitch.
+              SOURCE_FILE_LISTING.getSwitch())), targetPath);
+    } else {
+      if (sourcePaths.isEmpty()) {
+        throw new IllegalArgumentException("Neither source file listing nor " +
+            "source paths present");
+      }
+      option = new DistCpOptions(sourcePaths, targetPath);
+    }
     return option;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java
new file mode 100644
index 0000000..1c2b324
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.io.IOUtils;
+
+import java.io.*;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A CopyFilter which compares Java Regex Patterns to each Path to determine
+ * whether a file should be copied.
+ */
+public class RegexCopyFilter extends CopyFilter {
+
+  private static final Log LOG = LogFactory.getLog(RegexCopyFilter.class);
+  private File filtersFile;
+  private List<Pattern> filters;
+
+  /**
+   * Constructor, sets up a File object to read filter patterns from and
+   * the List to store the patterns.
+   */
+  protected RegexCopyFilter(String filtersFilename) {
+    filtersFile = new File(filtersFilename);
+    filters = new ArrayList<>();
+  }
+
+  /**
+   * Loads a list of filter patterns for use in shouldCopy.
+   */
+  @Override
+  public void initialize() {
+    BufferedReader reader = null;
+    try {
+      InputStream is = new FileInputStream(filtersFile);
+      reader = new BufferedReader(new InputStreamReader(is,
+          Charset.forName("UTF-8")));
+      String line;
+      while ((line = reader.readLine()) != null) {
+        Pattern pattern = Pattern.compile(line);
+        filters.add(pattern);
+      }
+    } catch (FileNotFoundException notFound) {
+      LOG.error("Can't find filters file " + filtersFile);
+    } catch (IOException cantRead) {
+      LOG.error("An error occurred while attempting to read from " +
+          filtersFile);
+    } finally {
+      IOUtils.cleanup(LOG, reader);
+    }
+  }
+
+  /**
+   * Sets the list of filters to exclude files from copy.
+   * Simplifies testing of the filters feature.
+   *
+   * @param filtersList a list of Patterns to be excluded
+   */
+  @VisibleForTesting
+  protected final void setFilters(List<Pattern> filtersList) {
+    this.filters = filtersList;
+  }
+
+  @Override
+  public boolean shouldCopy(Path path) {
+    for (Pattern filter : filters) {
+      if (filter.matcher(path.toString()).matches()) {
+        return false;
+      }
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index 4ea1dc9..8f50913 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -58,6 +58,7 @@ public class SimpleCopyListing extends CopyListing {
   private long totalBytesToCopy = 0;
   private int numListstatusThreads = 1;
   private final int maxRetries = 3;
+  private CopyFilter copyFilter;
 
   /**
    * Protected constructor, to initialize configuration.
@@ -71,6 +72,8 @@ public class SimpleCopyListing extends CopyListing {
     numListstatusThreads = getConf().getInt(
         DistCpConstants.CONF_LABEL_LISTSTATUS_THREADS,
         DistCpConstants.DEFAULT_LISTSTATUS_THREADS);
+    copyFilter = CopyFilter.getCopyFilter(getConf());
+    copyFilter.initialize();
   }
 
   @VisibleForTesting
@@ -213,7 +216,7 @@ public class SimpleCopyListing extends CopyListing {
                   preserveXAttrs && sourceStatus.isDirectory(),
                   preserveRawXAttrs && sourceStatus.isDirectory());
             writeToFileListing(fileListWriter, sourceCopyListingStatus,
-                sourcePathRoot, options);
+                sourcePathRoot);
 
             if (sourceStatus.isDirectory()) {
               if (LOG.isDebugEnabled()) {
@@ -264,11 +267,10 @@ public class SimpleCopyListing extends CopyListing {
    * Provide an option to skip copy of a path, Allows for exclusion
    * of files such as {@link org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter#SUCCEEDED_FILE_NAME}
    * @param path - Path being considered for copy while building the file listing
-   * @param options - Input options passed during DistCp invocation
    * @return - True if the path should be considered for copy, false otherwise
    */
-  protected boolean shouldCopy(Path path, DistCpOptions options) {
-    return true;
+  protected boolean shouldCopy(Path path) {
+    return copyFilter.shouldCopy(path);
   }
 
   /** {@inheritDoc} */
@@ -409,7 +411,7 @@ public class SimpleCopyListing extends CopyListing {
                 preserveXAttrs && child.isDirectory(),
                 preserveRawXattrs && child.isDirectory());
             writeToFileListing(fileListWriter, childCopyListingStatus,
-                 sourcePathRoot, options);
+                 sourcePathRoot);
           }
           if (retry < maxRetries) {
             if (child.isDirectory()) {
@@ -443,26 +445,23 @@ public class SimpleCopyListing extends CopyListing {
       }      
       return;
     }
-    writeToFileListing(fileListWriter, fileStatus, sourcePathRoot, options);
+    writeToFileListing(fileListWriter, fileStatus, sourcePathRoot);
   }
 
   private void writeToFileListing(SequenceFile.Writer fileListWriter,
                                   CopyListingFileStatus fileStatus,
-                                  Path sourcePathRoot,
-                                  DistCpOptions options) throws IOException {
+                                  Path sourcePathRoot) throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("REL PATH: " + DistCpUtils.getRelativePath(sourcePathRoot,
         fileStatus.getPath()) + ", FULL PATH: " + fileStatus.getPath());
     }
 
-    FileStatus status = fileStatus;
-
-    if (!shouldCopy(fileStatus.getPath(), options)) {
+    if (!shouldCopy(fileStatus.getPath())) {
       return;
     }
 
     fileListWriter.append(new Text(DistCpUtils.getRelativePath(sourcePathRoot,
-        fileStatus.getPath())), status);
+        fileStatus.getPath())), fileStatus);
     fileListWriter.sync();
 
     if (!fileStatus.isDirectory()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/TrueCopyFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/TrueCopyFilter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/TrueCopyFilter.java
new file mode 100644
index 0000000..b58dd9c
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/TrueCopyFilter.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.hadoop.fs.Path;
+
+/**
+ * A CopyFilter which always returns true.
+ *
+ */
+public class TrueCopyFilter extends CopyFilter {
+
+  @Override
+  public boolean shouldCopy(Path path) {
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/package-info.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/package-info.java
new file mode 100644
index 0000000..92278ed
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/package-info.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * DistCp is a tool for replicating data using MapReduce jobs for concurrent
+ * copy operations.
+ *
+ * @version 2
+ */
+package org.apache.hadoop.tools;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java
index 8381c1b..896763d 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java
@@ -95,40 +95,6 @@ public class TestCopyListing extends SimpleCopyListing {
   }
 
   @Test(timeout=10000)
-  public void testSkipCopy() throws Exception {
-    SimpleCopyListing listing = new SimpleCopyListing(getConf(), CREDENTIALS) {
-      @Override
-      protected boolean shouldCopy(Path path, DistCpOptions options) {
-        return !path.getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME);
-      }
-    };
-    FileSystem fs = FileSystem.get(getConf());
-    List<Path> srcPaths = new ArrayList<Path>();
-    srcPaths.add(new Path("/tmp/in4/1"));
-    srcPaths.add(new Path("/tmp/in4/2"));
-    Path target = new Path("/tmp/out4/1");
-    TestDistCpUtils.createFile(fs, "/tmp/in4/1/_SUCCESS");
-    TestDistCpUtils.createFile(fs, "/tmp/in4/1/file");
-    TestDistCpUtils.createFile(fs, "/tmp/in4/2");
-    fs.mkdirs(target);
-    DistCpOptions options = new DistCpOptions(srcPaths, target);
-    Path listingFile = new Path("/tmp/list4");
-    listing.buildListing(listingFile, options);
-    Assert.assertEquals(listing.getNumberOfPaths(), 3);
-    SequenceFile.Reader reader = new SequenceFile.Reader(getConf(),
-        SequenceFile.Reader.file(listingFile));
-    CopyListingFileStatus fileStatus = new CopyListingFileStatus();
-    Text relativePath = new Text();
-    Assert.assertTrue(reader.next(relativePath, fileStatus));
-    Assert.assertEquals(relativePath.toString(), "/1");
-    Assert.assertTrue(reader.next(relativePath, fileStatus));
-    Assert.assertEquals(relativePath.toString(), "/1/file");
-    Assert.assertTrue(reader.next(relativePath, fileStatus));
-    Assert.assertEquals(relativePath.toString(), "/2");
-    Assert.assertFalse(reader.next(relativePath, fileStatus));
-  }
-
-  @Test(timeout=10000)
   public void testMultipleSrcToFile() {
     FileSystem fs = null;
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
index 5726342..ee8e7cc 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
@@ -242,55 +242,6 @@ public class TestIntegration {
   }
 
   @Test(timeout=100000)
-  public void testCustomCopyListing() {
-
-    try {
-      addEntries(listFile, "multifile1/file3", "multifile1/file4", "multifile1/file5");
-      createFiles("multifile1/file3", "multifile1/file4", "multifile1/file5");
-      mkdirs(target.toString());
-
-      Configuration conf = getConf();
-      try {
-        conf.setClass(DistCpConstants.CONF_LABEL_COPY_LISTING_CLASS,
-            CustomCopyListing.class, CopyListing.class);
-        DistCpOptions options = new DistCpOptions(Arrays.
-            asList(new Path(root + "/" + "multifile1")), target);
-        options.setSyncFolder(true);
-        options.setDeleteMissing(false);
-        options.setOverwrite(false);
-        try {
-          new DistCp(conf, options).execute();
-        } catch (Exception e) {
-          LOG.error("Exception encountered ", e);
-          throw new IOException(e);
-        }
-      } finally {
-        conf.unset(DistCpConstants.CONF_LABEL_COPY_LISTING_CLASS);
-      }
-
-      checkResult(target, 2, "file4", "file5");
-    } catch (IOException e) {
-      LOG.error("Exception encountered while testing distcp", e);
-      Assert.fail("distcp failure");
-    } finally {
-      TestDistCpUtils.delete(fs, root);
-    }
-  }
-
-  private static class CustomCopyListing extends SimpleCopyListing {
-
-    public CustomCopyListing(Configuration configuration,
-                             Credentials credentials) {
-      super(configuration, credentials);
-    }
-
-    @Override
-    protected boolean shouldCopy(Path path, DistCpOptions options) {
-      return !path.getName().equals("file3");
-    }
-  }
-
-  @Test(timeout=100000)
   public void testMultiFileTargetMissing() {
     caseMultiFileTargetMissing(false);
     caseMultiFileTargetMissing(true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
index 6eddfb2..b9d9ada 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
@@ -400,7 +400,7 @@ public class TestOptionsParser {
     String val = "DistCpOptions{atomicCommit=false, syncFolder=false, deleteMissing=false, " +
         "ignoreFailures=false, maxMaps=20, sslConfigurationFile='null', copyStrategy='uniformsize', " +
         "sourceFileListing=abc, sourcePaths=null, targetPath=xyz, targetPathExists=true, " +
-        "preserveRawXattrs=false}";
+        "preserveRawXattrs=false, filtersFile='null'}";
     Assert.assertEquals(val, option.toString());
     Assert.assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(),
         DistCpOptionSwitch.ATOMIC_COMMIT.name());
@@ -718,4 +718,19 @@ public class TestOptionsParser {
           "Diff is valid only with update and delete options", e);
     }
   }
+
+  @Test
+  public void testExclusionsOption() {
+    DistCpOptions options = OptionsParser.parse(new String[] {
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
+    Assert.assertNull(options.getFiltersFile());
+
+    options = OptionsParser.parse(new String[] {
+        "-filters",
+        "/tmp/filters.txt",
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
+    Assert.assertEquals(options.getFiltersFile(), "/tmp/filters.txt");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestRegexCopyFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestRegexCopyFilter.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestRegexCopyFilter.java
new file mode 100644
index 0000000..5618a0b
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestRegexCopyFilter.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Pattern;
+
+public class TestRegexCopyFilter {
+
+  @Test
+  public void testShouldCopyTrue() {
+    List<Pattern> filters = new ArrayList<>();
+    filters.add(Pattern.compile("user"));
+
+    RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
+    regexCopyFilter.setFilters(filters);
+
+    Path shouldCopyPath = new Path("/user/bar");
+    Assert.assertTrue(regexCopyFilter.shouldCopy(shouldCopyPath));
+  }
+
+  @Test
+  public void testShouldCopyFalse() {
+    List<Pattern> filters = new ArrayList<>();
+    filters.add(Pattern.compile(".*test.*"));
+
+    RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
+    regexCopyFilter.setFilters(filters);
+
+    Path shouldNotCopyPath = new Path("/user/testing");
+    Assert.assertFalse(regexCopyFilter.shouldCopy(shouldNotCopyPath));
+  }
+
+  @Test
+  public void testShouldCopyWithMultipleFilters() {
+    List<Pattern> filters = new ArrayList<>();
+    filters.add(Pattern.compile(".*test.*"));
+    filters.add(Pattern.compile("/user/b.*"));
+    filters.add(Pattern.compile(".*_SUCCESS"));
+
+    List<Path> toCopy = getTestPaths();
+
+    int shouldCopyCount = 0;
+
+    RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
+    regexCopyFilter.setFilters(filters);
+
+    for (Path path: toCopy) {
+      if (regexCopyFilter.shouldCopy(path)) {
+        shouldCopyCount++;
+      }
+    }
+
+    Assert.assertEquals(2, shouldCopyCount);
+  }
+
+  @Test
+  public void testShouldExcludeAll() {
+    List<Pattern> filters = new ArrayList<>();
+    filters.add(Pattern.compile(".*test.*"));
+    filters.add(Pattern.compile("/user/b.*"));
+    filters.add(Pattern.compile(".*"));           // exclude everything
+
+    List<Path> toCopy = getTestPaths();
+
+    int shouldCopyCount = 0;
+
+    RegexCopyFilter regexCopyFilter = new RegexCopyFilter("fakeFile");
+    regexCopyFilter.setFilters(filters);
+
+    for (Path path: toCopy) {
+      if (regexCopyFilter.shouldCopy(path)) {
+        shouldCopyCount++;
+      }
+    }
+
+    Assert.assertEquals(0, shouldCopyCount);
+  }
+
+  private List<Path> getTestPaths() {
+    List<Path> toCopy = new ArrayList<>();
+    toCopy.add(new Path("/user/bar"));
+    toCopy.add(new Path("/user/foo/_SUCCESS"));
+    toCopy.add(new Path("/hive/test_data"));
+    toCopy.add(new Path("test"));
+    toCopy.add(new Path("/user/foo/bar"));
+    toCopy.add(new Path("/mapred/.staging_job"));
+    return toCopy;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0790275f/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestTrueCopyFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestTrueCopyFilter.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestTrueCopyFilter.java
new file mode 100644
index 0000000..2ea60a9
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestTrueCopyFilter.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestTrueCopyFilter {
+
+  @Test
+  public void testShouldCopy() {
+    Assert.assertTrue(new TrueCopyFilter().shouldCopy(new Path("fake")));
+  }
+
+  @Test
+  public void testShouldCopyWithNull() {
+    Assert.assertTrue(new TrueCopyFilter().shouldCopy(new Path("fake")));
+  }
+}


[11/50] [abbrv] hadoop git commit: HDFS-8403. Eliminate retries in TestFileCreation#testOverwriteOpenForWrite. Contributed by Arpit Agarwal.

Posted by ji...@apache.org.
HDFS-8403. Eliminate retries in TestFileCreation#testOverwriteOpenForWrite. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac742c76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac742c76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac742c76

Branch: refs/heads/HDFS-7240
Commit: ac742c762d5b01af61022827e9f78fd81b69d717
Parents: e5afac5
Author: Haohui Mai <wh...@apache.org>
Authored: Fri May 15 19:12:40 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Fri May 15 19:12:40 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  5 +++-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  9 +++++++
 .../org/apache/hadoop/hdfs/NameNodeProxies.java | 28 +++++++++++++++++++-
 .../apache/hadoop/hdfs/TestFileCreation.java    | 10 +++++--
 5 files changed, 51 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac742c76/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4a33987..0c4d850 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -775,6 +775,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver
     properties. (Ray Chiang via aajisaka)
 
+    HDFS-8403. Eliminate retries in TestFileCreation
+    #testOverwriteOpenForWrite. (Arpit Agarwal via wheat9)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac742c76/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7908451..f32702e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -341,10 +341,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
       this.namenode = rpcNamenode;
       dtService = null;
     } else {
+      boolean noRetries = conf.getBoolean(
+          DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES,
+          DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT);
       Preconditions.checkArgument(nameNodeUri != null,
           "null URI");
       proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
-          ClientProtocol.class, nnFallbackToSimpleAuth);
+          ClientProtocol.class, nnFallbackToSimpleAuth, !noRetries);
       this.dtService = proxyInfo.getDelegationTokenService();
       this.namenode = proxyInfo.getProxy();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac742c76/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1d0cf4b..c903e76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -993,6 +994,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // For testing NN retry cache, we can set this property with positive value.
   public static final String  DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
   public static final int     DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
+
+  // Create a NN proxy without retries for testing.
+  @VisibleForTesting
+  public static final String  DFS_CLIENT_TEST_NO_PROXY_RETRIES =
+      "dfs.client.test.no.proxy.retries";
+  @VisibleForTesting
+  public static final boolean DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT = false;
+
   public static final String  DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
       "dfs.client.slow.io.warning.threshold.ms";
   public static final long    DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac742c76/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index 77262bc..bafc76f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -161,6 +161,31 @@ public class NameNodeProxies {
   public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
       URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
       throws IOException {
+    return createProxy(conf, nameNodeUri, xface, fallbackToSimpleAuth, true);
+  }
+
+  /**
+   * Creates the namenode proxy with the passed protocol. This will handle
+   * creation of either HA- or non-HA-enabled proxy objects, depending upon
+   * if the provided URI is a configured logical URI.
+   *
+   * @param conf the configuration containing the required IPC
+   *        properties, client failover configurations, etc.
+   * @param nameNodeUri the URI pointing either to a specific NameNode
+   *        or to a logical nameservice.
+   * @param xface the IPC interface which should be created
+   * @param fallbackToSimpleAuth set to true or false during calls to
+   *   indicate if a secure client falls back to simple auth
+   * @param withRetries certain interfaces have a non-standard retry policy
+   * @return an object containing both the proxy and the associated
+   *         delegation token service it corresponds to
+   * @throws IOException if there is an error creating the proxy
+   **/
+  @SuppressWarnings("unchecked")
+  public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
+      URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth,
+      boolean withRetries)
+      throws IOException {
     AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
         createFailoverProxyProvider(conf, nameNodeUri, xface, true,
           fallbackToSimpleAuth);
@@ -168,7 +193,8 @@ public class NameNodeProxies {
     if (failoverProxyProvider == null) {
       // Non-HA case
       return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
-          UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
+          UserGroupInformation.getCurrentUser(), withRetries,
+          fallbackToSimpleAuth);
     } else {
       // HA case
       DfsClientConf config = new DfsClientConf(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac742c76/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index f732ace..4977015 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -73,6 +74,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -377,6 +379,11 @@ public class TestFileCreation {
     Configuration conf = new HdfsConfiguration();
     SimulatedFSDataset.setFactory(conf);
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+
+    // Force NameNodeProxies' createNNProxyWithClientProtocol to give
+    // up file creation after one failure.
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES, true);
+
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
 
@@ -405,9 +412,8 @@ public class TestFileCreation {
       } catch (IOException abce) {
         GenericTestUtils.assertExceptionContains("Failed to CREATE_FILE", abce);
       }
-      // NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
       assertCounter("AlreadyBeingCreatedExceptionNumOps",
-          6L, getMetrics(metricsName));
+          1L, getMetrics(metricsName));
       FSDataOutputStream stm2 = fs2.create(p, true);
       stm2.write(2);
       stm2.close();


[12/50] [abbrv] hadoop git commit: YARN-3526. ApplicationMaster tracking URL is incorrectly redirected on a QJM cluster. Contributed by Weiwei Yang

Posted by ji...@apache.org.
YARN-3526. ApplicationMaster tracking URL is incorrectly redirected on a QJM cluster. Contributed by Weiwei Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0ad6440
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0ad6440
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0ad6440

Branch: refs/heads/HDFS-7240
Commit: b0ad644083a0dfae3a39159ac88b6fc09d846371
Parents: ac742c7
Author: Xuan <xg...@apache.org>
Authored: Fri May 15 22:39:49 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Fri May 15 22:39:49 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../test/java/org/apache/hadoop/yarn/client/TestRMFailover.java   | 3 +++
 .../hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java | 2 ++
 3 files changed, 8 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ad6440/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 810152a..82174e7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -503,6 +503,9 @@ Release 2.7.1 - UNRELEASED
     YARN-3641. NodeManager: stopRecoveryStore() shouldn't be skipped when
     exceptions happen in stopping NM's sub-services. (Junping Du via jlowe)
 
+    YARN-3526. ApplicationMaster tracking URL is incorrectly redirected
+    on a QJM cluster. (Weiwei Yang via xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ad6440/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 4938255..d4fc5c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -326,6 +326,9 @@ public class TestRMFailover extends ClientBaseWithFixes {
     header = getHeader("Refresh", rm2Url + "/ws/v1/cluster/apps");
     assertTrue(header.contains("; url=" + rm1Url));
 
+    header = getHeader("Refresh", rm2Url + "/proxy/" + fakeAppId);
+    assertEquals(null, header);
+
     // Due to the limitation of MiniYARNCluster and dispatcher is a singleton,
     // we couldn't add the test case after explicitFailover();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0ad6440/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
index b1027a8..500f17a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppFilter.java
@@ -30,6 +30,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.http.HtmlQuoting;
+import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Injector;
@@ -88,6 +89,7 @@ public class RMWebAppFilter extends GuiceContainer {
   private boolean shouldRedirect(RMWebApp rmWebApp, String uri) {
     return !uri.equals("/" + rmWebApp.wsName() + "/v1/cluster/info")
         && !uri.equals("/" + rmWebApp.name() + "/cluster")
+        && !uri.startsWith(ProxyUriUtils.PROXY_BASE)
         && !NON_REDIRECTED_URIS.contains(uri);
   }
 }


[13/50] [abbrv] hadoop git commit: HDFS-8157. Writes to RAM DISK reserve locked memory for block files. (Arpit Agarwal)

Posted by ji...@apache.org.
HDFS-8157. Writes to RAM DISK reserve locked memory for block files. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e453989a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e453989a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e453989a

Branch: refs/heads/HDFS-7240
Commit: e453989a5722e653bd97e3e54f9bbdffc9454fba
Parents: b0ad644
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sat May 16 09:05:35 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sat May 16 09:05:35 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hdfs/server/datanode/ReplicaInPipeline.java |  11 +-
 .../hdfs/server/datanode/ReplicaInfo.java       |  12 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |   8 +
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   2 +-
 .../impl/FsDatasetAsyncDiskService.java         |   7 +-
 .../datanode/fsdataset/impl/FsDatasetCache.java |  85 +++++++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 106 ++++++----
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  20 +-
 .../impl/RamDiskReplicaLruTracker.java          |  19 +-
 .../fsdataset/impl/RamDiskReplicaTracker.java   |  12 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   2 +-
 .../hdfs/server/balancer/TestBalancer.java      |   9 +-
 .../server/datanode/SimulatedFSDataset.java     |   4 +
 .../server/datanode/TestDirectoryScanner.java   |   9 +
 .../server/datanode/TestFsDatasetCache.java     |   4 +-
 .../datanode/extdataset/ExternalVolumeImpl.java |   4 +
 .../fsdataset/impl/LazyPersistTestCase.java     |  57 ++++--
 .../impl/TestLazyPersistLockedMemory.java       | 201 +++++++++++++++++++
 .../fsdataset/impl/TestWriteToReplica.java      |   4 +-
 20 files changed, 497 insertions(+), 82 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0c4d850..8c823ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -560,6 +560,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8394. Move getAdditionalBlock() and related functionalities into a
     separate class. (wheat9)
 
+    HDFS-8157. Writes to RAM DISK reserve locked memory for block files.
+    (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index cc55f85..0eb143a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -51,7 +51,8 @@ public class ReplicaInPipeline extends ReplicaInfo
    * the bytes already written to this block.
    */
   private long bytesReserved;
-  
+  private final long originalBytesReserved;
+
   /**
    * Constructor for a zero length replica
    * @param blockId block id
@@ -97,6 +98,7 @@ public class ReplicaInPipeline extends ReplicaInfo
     this.bytesOnDisk = len;
     this.writer = writer;
     this.bytesReserved = bytesToReserve;
+    this.originalBytesReserved = bytesToReserve;
   }
 
   /**
@@ -109,6 +111,7 @@ public class ReplicaInPipeline extends ReplicaInfo
     this.bytesOnDisk = from.getBytesOnDisk();
     this.writer = from.writer;
     this.bytesReserved = from.bytesReserved;
+    this.originalBytesReserved = from.originalBytesReserved;
   }
 
   @Override
@@ -149,8 +152,14 @@ public class ReplicaInPipeline extends ReplicaInfo
   }
   
   @Override
+  public long getOriginalBytesReserved() {
+    return originalBytesReserved;
+  }
+
+  @Override
   public void releaseAllBytesReserved() {  // ReplicaInPipelineInterface
     getVolume().releaseReservedSpace(bytesReserved);
+    getVolume().releaseLockedMemory(bytesReserved);
     bytesReserved = 0;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index 940d3eb..136d8a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -218,7 +218,17 @@ abstract public class ReplicaInfo extends Block implements Replica {
   public long getBytesReserved() {
     return 0;
   }
-  
+
+  /**
+   * Number of bytes originally reserved for this replica. The actual
+   * reservation is adjusted as data is written to disk.
+   *
+   * @return the number of bytes originally reserved for this replica.
+   */
+  public long getOriginalBytesReserved() {
+    return 0;
+  }
+
    /**
    * Copy specified file into a temporary file. Then rename the
    * temporary file to the original name. This will cause any

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 2a8f31b..8d1bb2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -73,6 +73,14 @@ public interface FsVolumeSpi {
   public void releaseReservedSpace(long bytesToRelease);
 
   /**
+   * Release reserved memory for an RBW block written to transient storage
+   * i.e. RAM.
+   * bytesToRelease will be rounded down to the OS page size since locked
+   * memory reservation must always be a multiple of the page size.
+   */
+  public void releaseLockedMemory(long bytesToRelease);
+
+  /**
    * BlockIterator will return ExtendedBlock entries from a block pool in
    * this volume.  The entries will be returned in sorted order.<p/>
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index a47d564..951c759 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -475,7 +475,7 @@ class BlockPoolSlice {
     // eventually.
     if (newReplica.getVolume().isTransientStorage()) {
       lazyWriteReplicaMap.addReplica(bpid, blockId,
-          (FsVolumeImpl) newReplica.getVolume());
+          (FsVolumeImpl) newReplica.getVolume(), 0);
     } else {
       lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index c1d3990..fdc9f83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.File;
 import java.io.FileDescriptor;
-import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -277,7 +276,8 @@ class FsDatasetAsyncDiskService {
 
     @Override
     public void run() {
-      long dfsBytes = blockFile.length() + metaFile.length();
+      final long blockLength = blockFile.length();
+      final long metaLength = metaFile.length();
       boolean result;
 
       result = (trashDirectory == null) ? deleteFiles() : moveFiles();
@@ -291,7 +291,8 @@ class FsDatasetAsyncDiskService {
         if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
           datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
         }
-        volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
+        volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength);
+        volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength);
         LOG.info("Deleted " + block.getBlockPoolId() + " "
             + block.getLocalBlock() + " file " + blockFile);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
index e0df0f2..6f524b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
@@ -151,10 +151,15 @@ public class FsDatasetCache {
     /**
      * Round up a number to the operating system page size.
      */
-    public long round(long count) {
-      long newCount = 
-          (count + (osPageSize - 1)) / osPageSize;
-      return newCount * osPageSize;
+    public long roundUp(long count) {
+      return (count + osPageSize - 1) & (~(osPageSize - 1));
+    }
+
+    /**
+     * Round down a number to the operating system page size.
+     */
+    public long roundDown(long count) {
+      return count & (~(osPageSize - 1));
     }
   }
 
@@ -173,7 +178,7 @@ public class FsDatasetCache {
      *                 -1 if we failed.
      */
     long reserve(long count) {
-      count = rounder.round(count);
+      count = rounder.roundUp(count);
       while (true) {
         long cur = usedBytes.get();
         long next = cur + count;
@@ -195,10 +200,23 @@ public class FsDatasetCache {
      * @return         The new number of usedBytes.
      */
     long release(long count) {
-      count = rounder.round(count);
+      count = rounder.roundUp(count);
       return usedBytes.addAndGet(-count);
     }
-    
+
+    /**
+     * Release some bytes that we're using rounded down to the page size.
+     *
+     * @param count    The number of bytes to release.  We will round this
+     *                 down to the page size.
+     *
+     * @return         The new number of usedBytes.
+     */
+    long releaseRoundDown(long count) {
+      count = rounder.roundDown(count);
+      return usedBytes.addAndGet(-count);
+    }
+
     long get() {
       return usedBytes.get();
     }
@@ -341,6 +359,52 @@ public class FsDatasetCache {
   }
 
   /**
+   * Try to reserve more bytes.
+   *
+   * @param count    The number of bytes to add.  We will round this
+   *                 up to the page size.
+   *
+   * @return         The new number of usedBytes if we succeeded;
+   *                 -1 if we failed.
+   */
+  long reserve(long count) {
+    return usedBytesCount.reserve(count);
+  }
+
+  /**
+   * Release some bytes that we're using.
+   *
+   * @param count    The number of bytes to release.  We will round this
+   *                 up to the page size.
+   *
+   * @return         The new number of usedBytes.
+   */
+  long release(long count) {
+    return usedBytesCount.release(count);
+  }
+
+  /**
+   * Release some bytes that we're using rounded down to the page size.
+   *
+   * @param count    The number of bytes to release.  We will round this
+   *                 down to the page size.
+   *
+   * @return         The new number of usedBytes.
+   */
+  long releaseRoundDown(long count) {
+    return usedBytesCount.releaseRoundDown(count);
+  }
+
+  /**
+   * Get the OS page size.
+   *
+   * @return the OS page size.
+   */
+  long getOsPageSize() {
+    return usedBytesCount.rounder.osPageSize;
+  }
+
+  /**
    * Background worker that mmaps, mlocks, and checksums a block
    */
   private class CachingTask implements Runnable {
@@ -363,7 +427,7 @@ public class FsDatasetCache {
       MappableBlock mappableBlock = null;
       ExtendedBlock extBlk = new ExtendedBlock(key.getBlockPoolId(),
           key.getBlockId(), length, genstamp);
-      long newUsedBytes = usedBytesCount.reserve(length);
+      long newUsedBytes = reserve(length);
       boolean reservedBytes = false;
       try {
         if (newUsedBytes < 0) {
@@ -423,7 +487,7 @@ public class FsDatasetCache {
         IOUtils.closeQuietly(metaIn);
         if (!success) {
           if (reservedBytes) {
-            usedBytesCount.release(length);
+            release(length);
           }
           LOG.debug("Caching of {} was aborted.  We are now caching only {} "
                   + "bytes in total.", key, usedBytesCount.get());
@@ -502,8 +566,7 @@ public class FsDatasetCache {
       synchronized (FsDatasetCache.this) {
         mappableBlockMap.remove(key);
       }
-      long newUsedBytes =
-          usedBytesCount.release(value.mappableBlock.getLength());
+      long newUsedBytes = release(value.mappableBlock.getLength());
       numBlocksCached.addAndGet(-1);
       dataset.datanode.getMetrics().incrBlocksUncached(1);
       if (revocationTimeMs != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 8725126..8ebd214 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -319,8 +319,18 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     cacheManager = new FsDatasetCache(this);
 
     // Start the lazy writer once we have built the replica maps.
-    lazyWriter = new Daemon(new LazyWriter(conf));
-    lazyWriter.start();
+    // We need to start the lazy writer even if MaxLockedMemory is set to
+    // zero because we may have un-persisted replicas in memory from before
+    // the process restart. To minimize the chances of data loss we'll
+    // ensure they get written to disk now.
+    if (ramDiskReplicaTracker.numReplicasNotPersisted() > 0 ||
+        datanode.getDnConf().getMaxLockedMemory() > 0) {
+      lazyWriter = new Daemon(new LazyWriter(conf));
+      lazyWriter.start();
+    } else {
+      lazyWriter = null;
+    }
+
     registerMBean(datanode.getDatanodeUuid());
 
     // Add a Metrics2 Source Interface. This is same
@@ -1284,26 +1294,33 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       " and thus cannot be created.");
     }
     // create a new block
-    FsVolumeReference ref;
-    while (true) {
+    FsVolumeReference ref = null;
+
+    // Use ramdisk only if block size is a multiple of OS page size.
+    // This simplifies reservation for partially used replicas
+    // significantly.
+    if (allowLazyPersist &&
+        lazyWriter != null &&
+        b.getNumBytes() % cacheManager.getOsPageSize() == 0 &&
+        (cacheManager.reserve(b.getNumBytes())) > 0) {
       try {
-        if (allowLazyPersist) {
-          // First try to place the block on a transient volume.
-          ref = volumes.getNextTransientVolume(b.getNumBytes());
-          datanode.getMetrics().incrRamDiskBlocksWrite();
-        } else {
-          ref = volumes.getNextVolume(storageType, b.getNumBytes());
-        }
-      } catch (DiskOutOfSpaceException de) {
-        if (allowLazyPersist) {
-          datanode.getMetrics().incrRamDiskBlocksWriteFallback();
-          allowLazyPersist = false;
-          continue;
+        // First try to place the block on a transient volume.
+        ref = volumes.getNextTransientVolume(b.getNumBytes());
+        datanode.getMetrics().incrRamDiskBlocksWrite();
+      } catch(DiskOutOfSpaceException de) {
+        // Ignore the exception since we just fall back to persistent storage.
+        datanode.getMetrics().incrRamDiskBlocksWriteFallback();
+      } finally {
+        if (ref == null) {
+          cacheManager.release(b.getNumBytes());
         }
-        throw de;
       }
-      break;
     }
+
+    if (ref == null) {
+      ref = volumes.getNextVolume(storageType, b.getNumBytes());
+    }
+
     FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
     // create an rbw file to hold block in the designated volume
     File f;
@@ -1564,7 +1581,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
 
       if (v.isTransientStorage()) {
-        ramDiskReplicaTracker.addReplica(bpid, replicaInfo.getBlockId(), v);
+        releaseLockedMemory(
+            replicaInfo.getOriginalBytesReserved() - replicaInfo.getNumBytes(),
+            false);
+        ramDiskReplicaTracker.addReplica(
+            bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes());
         datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
       }
     }
@@ -1811,9 +1832,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   }
 
   /**
-   * We're informed that a block is no longer valid.  We
-   * could lazily garbage-collect the block, but why bother?
-   * just get rid of it.
+   * We're informed that a block is no longer valid. Delete it.
    */
   @Override // FsDatasetSpi
   public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
@@ -2064,8 +2083,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   public void shutdown() {
     fsRunning = false;
 
-    ((LazyWriter) lazyWriter.getRunnable()).stop();
-    lazyWriter.interrupt();
+    if (lazyWriter != null) {
+      ((LazyWriter) lazyWriter.getRunnable()).stop();
+      lazyWriter.interrupt();
+    }
 
     if (mbeanName != null) {
       MBeans.unregister(mbeanName);
@@ -2083,11 +2104,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       volumes.shutdown();
     }
 
-    try {
-      lazyWriter.join();
-    } catch (InterruptedException ie) {
-      LOG.warn("FsDatasetImpl.shutdown ignoring InterruptedException " +
-               "from LazyWriter.join");
+    if (lazyWriter != null) {
+      try {
+        lazyWriter.join();
+      } catch (InterruptedException ie) {
+        LOG.warn("FsDatasetImpl.shutdown ignoring InterruptedException " +
+                     "from LazyWriter.join");
+      }
     }
   }
 
@@ -2173,7 +2196,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
             diskFile.length(), diskGS, vol, diskFile.getParentFile());
         volumeMap.add(bpid, diskBlockInfo);
         if (vol.isTransientStorage()) {
-          ramDiskReplicaTracker.addReplica(bpid, blockId, (FsVolumeImpl) vol);
+          long lockedBytesReserved =
+              cacheManager.reserve(diskBlockInfo.getNumBytes()) > 0 ?
+                  diskBlockInfo.getNumBytes() : 0;
+          ramDiskReplicaTracker.addReplica(
+              bpid, blockId, (FsVolumeImpl) vol, lockedBytesReserved);
         }
         LOG.warn("Added missing block to memory " + diskBlockInfo);
         return;
@@ -2760,12 +2787,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     boolean ramDiskConfigured = ramDiskConfigured();
     // Add thread for DISK volume if RamDisk is configured
     if (ramDiskConfigured &&
+        asyncLazyPersistService != null &&
         !asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
       asyncLazyPersistService.addVolume(v.getCurrentDir());
     }
 
     // Remove thread for DISK volume if RamDisk is not configured
     if (!ramDiskConfigured &&
+        asyncLazyPersistService != null &&
         asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
       asyncLazyPersistService.removeVolume(v.getCurrentDir());
     }
@@ -2790,9 +2819,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
     // Remove the old replicas
     if (blockFile.delete() || !blockFile.exists()) {
-      ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, blockFileUsed);
+      FsVolumeImpl volume = (FsVolumeImpl) replicaInfo.getVolume();
+      volume.onBlockFileDeletion(bpid, blockFileUsed);
       if (metaFile.delete() || !metaFile.exists()) {
-        ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, metaFileUsed);
+        volume.onMetaFileDeletion(bpid, metaFileUsed);
       }
     }
 
@@ -2905,8 +2935,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     }
 
     /**
-     * Attempt to evict one or more transient block replicas we have at least
-     * spaceNeeded bytes free.
+     * Attempt to evict one or more transient block replicas until we
+     * have at least spaceNeeded bytes free.
      */
     private void evictBlocks() throws IOException {
       int iterations = 0;
@@ -3056,5 +3086,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       s.add(blockId);
     }
   }
+
+  void releaseLockedMemory(long count, boolean roundup) {
+    if (roundup) {
+      cacheManager.release(count);
+    } else {
+      cacheManager.releaseRoundDown(count);
+    }
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index bc96a02..49a56bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -274,7 +274,18 @@ public class FsVolumeImpl implements FsVolumeSpi {
     return getBlockPoolSlice(bpid).getTmpDir();
   }
 
-  void decDfsUsed(String bpid, long value) {
+  void onBlockFileDeletion(String bpid, long value) {
+    decDfsUsed(bpid, value);
+    if (isTransientStorage()) {
+      dataset.releaseLockedMemory(value, true);
+    }
+  }
+
+  void onMetaFileDeletion(String bpid, long value) {
+    decDfsUsed(bpid, value);
+  }
+
+  private void decDfsUsed(String bpid, long value) {
     synchronized(dataset) {
       BlockPoolSlice bp = bpSlices.get(bpid);
       if (bp != null) {
@@ -428,6 +439,13 @@ public class FsVolumeImpl implements FsVolumeSpi {
     }
   }
 
+  @Override
+  public void releaseLockedMemory(long bytesToRelease) {
+    if (isTransientStorage()) {
+      dataset.releaseLockedMemory(bytesToRelease, false);
+    }
+  }
+
   private enum SubdirFilter implements FilenameFilter {
     INSTANCE;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java
index c01a6cf..b940736 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java
@@ -38,8 +38,10 @@ public class RamDiskReplicaLruTracker extends RamDiskReplicaTracker {
   private class RamDiskReplicaLru extends RamDiskReplica {
     long lastUsedTime;
 
-    private RamDiskReplicaLru(String bpid, long blockId, FsVolumeImpl ramDiskVolume) {
-      super(bpid, blockId, ramDiskVolume);
+    private RamDiskReplicaLru(String bpid, long blockId,
+                              FsVolumeImpl ramDiskVolume,
+                              long lockedBytesReserved) {
+      super(bpid, blockId, ramDiskVolume, lockedBytesReserved);
     }
 
     @Override
@@ -70,20 +72,23 @@ public class RamDiskReplicaLruTracker extends RamDiskReplicaTracker {
   TreeMultimap<Long, RamDiskReplicaLru> replicasPersisted;
 
   RamDiskReplicaLruTracker() {
-    replicaMaps = new HashMap<String, Map<Long, RamDiskReplicaLru>>();
-    replicasNotPersisted = new LinkedList<RamDiskReplicaLru>();
+    replicaMaps = new HashMap<>();
+    replicasNotPersisted = new LinkedList<>();
     replicasPersisted = TreeMultimap.create();
   }
 
   @Override
   synchronized void addReplica(final String bpid, final long blockId,
-                               final FsVolumeImpl transientVolume) {
+                               final FsVolumeImpl transientVolume,
+                               long lockedBytesReserved) {
     Map<Long, RamDiskReplicaLru> map = replicaMaps.get(bpid);
     if (map == null) {
-      map = new HashMap<Long, RamDiskReplicaLru>();
+      map = new HashMap<>();
       replicaMaps.put(bpid, map);
     }
-    RamDiskReplicaLru ramDiskReplicaLru = new RamDiskReplicaLru(bpid, blockId, transientVolume);
+    RamDiskReplicaLru ramDiskReplicaLru =
+        new RamDiskReplicaLru(bpid, blockId, transientVolume,
+                              lockedBytesReserved);
     map.put(blockId, ramDiskReplicaLru);
     replicasNotPersisted.add(ramDiskReplicaLru);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
index 7507925..335ed70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
@@ -45,6 +45,7 @@ public abstract class RamDiskReplicaTracker {
     private final long blockId;
     private File savedBlockFile;
     private File savedMetaFile;
+    private long lockedBytesReserved;
 
     private long creationTime;
     protected AtomicLong numReads = new AtomicLong(0);
@@ -61,10 +62,12 @@ public abstract class RamDiskReplicaTracker {
     FsVolumeImpl lazyPersistVolume;
 
     RamDiskReplica(final String bpid, final long blockId,
-                   final FsVolumeImpl ramDiskVolume) {
+                   final FsVolumeImpl ramDiskVolume,
+                   long lockedBytesReserved) {
       this.bpid = bpid;
       this.blockId = blockId;
       this.ramDiskVolume = ramDiskVolume;
+      this.lockedBytesReserved = lockedBytesReserved;
       lazyPersistVolume = null;
       savedMetaFile = null;
       savedBlockFile = null;
@@ -168,6 +171,10 @@ public abstract class RamDiskReplicaTracker {
     public String toString() {
       return "[BlockPoolID=" + bpid + "; BlockId=" + blockId + "]";
     }
+
+    public long getLockedBytesReserved() {
+      return lockedBytesReserved;
+    }
   }
 
   /**
@@ -201,7 +208,8 @@ public abstract class RamDiskReplicaTracker {
    * @param transientVolume RAM disk volume that stores the replica.
    */
   abstract void addReplica(final String bpid, final long blockId,
-                           final FsVolumeImpl transientVolume);
+                           final FsVolumeImpl transientVolume,
+                           long lockedBytesReserved);
 
   /**
    * Invoked when a replica is opened by a client. This may be used as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 12ad23e..fdbacdc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1582,7 +1582,7 @@ public class MiniDFSCluster {
       throw new IllegalStateException("Attempting to finalize "
                                       + "Namenode but it is not running");
     }
-    ToolRunner.run(new DFSAdmin(conf), new String[] {"-finalizeUpgrade"});
+    ToolRunner.run(new DFSAdmin(conf), new String[]{"-finalizeUpgrade"});
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index e756f0b..92d31d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters;
 import org.apache.hadoop.hdfs.server.balancer.Balancer.Result;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -120,13 +121,16 @@ public class TestBalancer {
     conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
   }
 
-  static void initConfWithRamDisk(Configuration conf) {
+  static void initConfWithRamDisk(Configuration conf,
+                                  long ramDiskCapacity) {
     conf.setLong(DFS_BLOCK_SIZE_KEY, DEFAULT_RAM_DISK_BLOCK_SIZE);
+    conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, ramDiskCapacity);
     conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, 3);
     conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
     conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1);
     conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, DEFAULT_RAM_DISK_BLOCK_SIZE);
+    LazyPersistTestCase.initCacheManipulator();
   }
 
   /* create a file with a length of <code>fileLen</code> */
@@ -1245,7 +1249,6 @@ public class TestBalancer {
     final int SEED = 0xFADED;
     final short REPL_FACT = 1;
     Configuration conf = new Configuration();
-    initConfWithRamDisk(conf);
 
     final int defaultRamDiskCapacity = 10;
     final long ramDiskStorageLimit =
@@ -1255,6 +1258,8 @@ public class TestBalancer {
       ((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) +
       (DEFAULT_RAM_DISK_BLOCK_SIZE - 1);
 
+    initConfWithRamDisk(conf, ramDiskStorageLimit);
+
     cluster = new MiniDFSCluster
       .Builder(conf)
       .numDataNodes(1)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 2ac9416..778dd28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -492,6 +492,10 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     }
 
     @Override
+    public void releaseLockedMemory(long bytesToRelease) {
+    }
+
+    @Override
     public void releaseReservedSpace(long bytesToRelease) {
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index b225e35..9b942b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
@@ -79,6 +80,8 @@ public class TestDirectoryScanner {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_LENGTH);
     CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
     CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+                 Long.MAX_VALUE);
   }
 
   /** create a file with a length of <code>fileLen</code> */
@@ -308,6 +311,7 @@ public class TestDirectoryScanner {
 
   @Test (timeout=300000)
   public void testRetainBlockOnPersistentStorage() throws Exception {
+    LazyPersistTestCase.initCacheManipulator();
     cluster = new MiniDFSCluster
         .Builder(CONF)
         .storageTypes(new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
@@ -349,6 +353,7 @@ public class TestDirectoryScanner {
 
   @Test (timeout=300000)
   public void testDeleteBlockOnTransientStorage() throws Exception {
+    LazyPersistTestCase.initCacheManipulator();
     cluster = new MiniDFSCluster
         .Builder(CONF)
         .storageTypes(new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
@@ -615,6 +620,10 @@ public class TestDirectoryScanner {
     }
 
     @Override
+    public void releaseLockedMemory(long bytesToRelease) {
+    }
+
+    @Override
     public BlockIterator newBlockIterator(String bpid, String name) {
       throw new UnsupportedOperationException();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
index 7a09630..58932fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
@@ -339,7 +339,7 @@ public class TestFsDatasetCache {
     for (int i=0; i<numFiles-1; i++) {
       setHeartbeatResponse(cacheBlocks(fileLocs[i]));
       total = DFSTestUtil.verifyExpectedCacheUsage(
-          rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd);
+          rounder.roundUp(total + fileSizes[i]), 4 * (i + 1), fsd);
     }
 
     // nth file should hit a capacity exception
@@ -365,7 +365,7 @@ public class TestFsDatasetCache {
     int curCachedBlocks = 16;
     for (int i=0; i<numFiles-1; i++) {
       setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
-      long uncachedBytes = rounder.round(fileSizes[i]);
+      long uncachedBytes = rounder.roundUp(fileSizes[i]);
       total -= uncachedBytes;
       curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
       DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
index ea9e4c1..3242ff7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
@@ -82,6 +82,10 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
   }
 
   @Override
+  public void releaseLockedMemory(long bytesToRelease) {
+  }
+
+  @Override
   public BlockIterator newBlockIterator(String bpid, String name) {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 5dc86f7..5ce5cc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -23,16 +23,7 @@ import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
@@ -40,6 +31,7 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.HashSet;
@@ -68,6 +60,7 @@ import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.tools.JMXGet;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -80,8 +73,8 @@ public abstract class LazyPersistTestCase {
   static final byte LAZY_PERSIST_POLICY_ID = (byte) 15;
 
   static {
-    DFSTestUtil.setNameNodeLogLevel(Level.ALL);
-    GenericTestUtils.setLogLevel(FsDatasetImpl.LOG, Level.ALL);
+    DFSTestUtil.setNameNodeLogLevel(Level.DEBUG);
+    GenericTestUtils.setLogLevel(FsDatasetImpl.LOG, Level.DEBUG);
   }
 
   protected static final int BLOCK_SIZE = 5 * 1024 * 1024;
@@ -95,6 +88,8 @@ public abstract class LazyPersistTestCase {
   protected static final int LAZY_WRITER_INTERVAL_SEC = 1;
   protected static final Log LOG = LogFactory.getLog(LazyPersistTestCase.class);
   protected static final short REPL_FACTOR = 1;
+  protected final long osPageSize =
+      NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
 
   protected MiniDFSCluster cluster;
   protected DistributedFileSystem fs;
@@ -194,7 +189,7 @@ public abstract class LazyPersistTestCase {
   protected final void makeRandomTestFile(Path path, long length,
       boolean isLazyPersist, long seed) throws IOException {
     DFSTestUtil.createFile(fs, path, isLazyPersist, BUFFER_LENGTH, length,
-      BLOCK_SIZE, REPL_FACTOR, seed, true);
+                           BLOCK_SIZE, REPL_FACTOR, seed, true);
   }
 
   protected final void makeTestFile(Path path, long length,
@@ -242,10 +237,12 @@ public abstract class LazyPersistTestCase {
       int ramDiskReplicaCapacity,
       long ramDiskStorageLimit,
       long evictionLowWatermarkReplicas,
+      long maxLockedMemory,
       boolean useSCR,
       boolean useLegacyBlockReaderLocal,
       boolean disableScrubber) throws IOException {
 
+    initCacheManipulator();
     Configuration conf = new Configuration();
     conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     if (disableScrubber) {
@@ -262,6 +259,7 @@ public abstract class LazyPersistTestCase {
     conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
                 evictionLowWatermarkReplicas * BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
+    conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, maxLockedMemory);
 
     if (useSCR) {
       conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
@@ -311,6 +309,31 @@ public abstract class LazyPersistTestCase {
     LOG.info("Cluster startup complete");
   }
 
+  /**
+   * Use a dummy cache manipulator for testing.
+   */
+  public static void initCacheManipulator() {
+    NativeIO.POSIX.setCacheManipulator(new NativeIO.POSIX.CacheManipulator() {
+      @Override
+      public void mlock(String identifier,
+                        ByteBuffer mmap, long length) throws IOException {
+        LOG.info("LazyPersistTestCase: faking mlock of " + identifier + " bytes.");
+      }
+
+      @Override
+      public long getMemlockLimit() {
+        LOG.info("LazyPersistTestCase: fake return " + Long.MAX_VALUE);
+        return Long.MAX_VALUE;
+      }
+
+      @Override
+      public boolean verifyCanMlock() {
+        LOG.info("LazyPersistTestCase: fake return " + true);
+        return true;
+      }
+    });
+  }
+
   ClusterWithRamDiskBuilder getClusterBuilder() {
     return new ClusterWithRamDiskBuilder();
   }
@@ -344,6 +367,11 @@ public abstract class LazyPersistTestCase {
       return this;
     }
 
+    public ClusterWithRamDiskBuilder setMaxLockedMemory(long maxLockedMemory) {
+      this.maxLockedMemory = maxLockedMemory;
+      return this;
+    }
+
     public ClusterWithRamDiskBuilder setUseScr(boolean useScr) {
       this.useScr = useScr;
       return this;
@@ -376,13 +404,14 @@ public abstract class LazyPersistTestCase {
       LazyPersistTestCase.this.startUpCluster(
           numDatanodes, hasTransientStorage, storageTypes, ramDiskReplicaCapacity,
           ramDiskStorageLimit, evictionLowWatermarkReplicas,
-          useScr, useLegacyBlockReaderLocal,disableScrubber);
+          maxLockedMemory, useScr, useLegacyBlockReaderLocal, disableScrubber);
     }
 
     private int numDatanodes = REPL_FACTOR;
     private StorageType[] storageTypes = null;
     private int ramDiskReplicaCapacity = -1;
     private long ramDiskStorageLimit = -1;
+    private long maxLockedMemory = Long.MAX_VALUE;
     private boolean hasTransientStorage = true;
     private boolean useScr = false;
     private boolean useLegacyBlockReaderLocal = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java
new file mode 100644
index 0000000..9ea4665
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.MetricsAsserts;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CreateFlag.CREATE;
+import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
+import static org.apache.hadoop.fs.StorageType.DEFAULT;
+import static org.apache.hadoop.fs.StorageType.RAM_DISK;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Verify that locked memory is used correctly when writing to replicas in
+ * memory
+ */
+public class TestLazyPersistLockedMemory extends LazyPersistTestCase {
+
+  /**
+   * RAM disk present but locked memory is set to zero. Placement should
+   * fall back to disk.
+   */
+  @Test
+  public void testWithNoLockedMemory() throws IOException {
+    getClusterBuilder().setNumDatanodes(1)
+                       .setMaxLockedMemory(0).build();
+
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    Path path = new Path("/" + METHOD_NAME + ".dat");
+    makeTestFile(path, BLOCK_SIZE, true);
+    ensureFileReplicasOnStorageType(path, DEFAULT);
+  }
+
+  @Test
+  public void testReservation()
+      throws IOException, TimeoutException, InterruptedException {
+    getClusterBuilder().setNumDatanodes(1)
+                       .setMaxLockedMemory(BLOCK_SIZE).build();
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
+
+    // Create a file and ensure the replica in RAM_DISK uses locked memory.
+    Path path = new Path("/" + METHOD_NAME + ".dat");
+    makeTestFile(path, BLOCK_SIZE, true);
+    ensureFileReplicasOnStorageType(path, RAM_DISK);
+    assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE));
+  }
+
+  @Test
+  public void testReleaseOnFileDeletion()
+      throws IOException, TimeoutException, InterruptedException {
+    getClusterBuilder().setNumDatanodes(1)
+                       .setMaxLockedMemory(BLOCK_SIZE).build();
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
+
+    Path path = new Path("/" + METHOD_NAME + ".dat");
+    makeTestFile(path, BLOCK_SIZE, true);
+    ensureFileReplicasOnStorageType(path, RAM_DISK);
+    assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE));
+
+    // Delete the file and ensure that the locked memory is released.
+    fs.delete(path, false);
+    DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
+    waitForLockedBytesUsed(fsd, 0);
+  }
+
+  /**
+   * Verify that locked RAM is released when blocks are evicted from RAM disk.
+   */
+  @Test
+  public void testReleaseOnEviction()
+      throws IOException, TimeoutException, InterruptedException {
+    getClusterBuilder().setNumDatanodes(1)
+                       .setMaxLockedMemory(BLOCK_SIZE)
+                       .setRamDiskReplicaCapacity(BLOCK_SIZE * 2 - 1)
+                       .build();
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
+
+    Path path = new Path("/" + METHOD_NAME + ".dat");
+    makeTestFile(path, BLOCK_SIZE, true);
+
+    // The block should get evicted soon since it pushes RAM disk free
+    // space below the threshold.
+    waitForLockedBytesUsed(fsd, 0);
+
+    MetricsRecordBuilder rb =
+        MetricsAsserts.getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
+    MetricsAsserts.assertCounter("RamDiskBlocksEvicted", 1L, rb);
+  }
+
+  /**
+   * Verify that locked bytes are correctly updated when a block is finalized
+   * at less than its max length.
+   */
+  @Test
+  public void testShortBlockFinalized()
+      throws IOException, TimeoutException, InterruptedException {
+    getClusterBuilder().setNumDatanodes(1).build();
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
+
+    Path path = new Path("/" + METHOD_NAME + ".dat");
+    makeTestFile(path, 1, true);
+    assertThat(fsd.getCacheUsed(), is(osPageSize));
+
+    // Delete the file and ensure locked RAM usage goes to zero.
+    fs.delete(path, false);
+    waitForLockedBytesUsed(fsd, 0);
+  }
+
+  /**
+   * Verify that locked bytes are correctly updated when the client goes
+   * away unexpectedly during a write.
+   */
+  @Test
+  public void testWritePipelineFailure()
+    throws IOException, TimeoutException, InterruptedException {
+    getClusterBuilder().setNumDatanodes(1).build();
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
+
+    Path path = new Path("/" + METHOD_NAME + ".dat");
+
+    EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST);
+    // Write 1 byte to the file and kill the writer.
+    final FSDataOutputStream fos =
+        fs.create(path,
+                  FsPermission.getFileDefault(),
+                  createFlags,
+                  BUFFER_LENGTH,
+                  REPL_FACTOR,
+                  BLOCK_SIZE,
+                  null);
+
+    fos.write(new byte[1]);
+    fos.hsync();
+    DFSTestUtil.abortStream((DFSOutputStream) fos.getWrappedStream());
+    waitForLockedBytesUsed(fsd, osPageSize);
+
+    // Delete the file and ensure locked RAM goes to zero.
+    fs.delete(path, false);
+    DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
+    waitForLockedBytesUsed(fsd, 0);
+  }
+
+  /**
+   * Wait until used locked byte count goes to the expected value.
+   * @throws TimeoutException after 300 seconds.
+   */
+  private void waitForLockedBytesUsed(final FsDatasetSpi<?> fsd,
+                                      final long expectedLockedBytes)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        long cacheUsed = fsd.getCacheUsed();
+        LOG.info("cacheUsed=" + cacheUsed + ", waiting for it to be " + expectedLockedBytes);
+        if (cacheUsed < 0) {
+          throw new IllegalStateException("cacheUsed unpexpectedly negative");
+        }
+        return (cacheUsed == expectedLockedBytes);
+      }
+    }, 1000, 300000);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e453989a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index d5664cf..a77184b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -204,7 +204,7 @@ public class TestWriteToReplica {
     long available = v.getCapacity()-v.getDfsUsed();
     long expectedLen = blocks[FINALIZED].getNumBytes();
     try {
-      v.decDfsUsed(bpid, -available);
+      v.onBlockFileDeletion(bpid, -available);
       blocks[FINALIZED].setNumBytes(expectedLen+100);
       dataSet.append(blocks[FINALIZED], newGS, expectedLen);
       Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
@@ -212,7 +212,7 @@ public class TestWriteToReplica {
       Assert.assertTrue(e.getMessage().startsWith(
           "Insufficient space for appending to "));
     }
-    v.decDfsUsed(bpid, available);
+    v.onBlockFileDeletion(bpid, available);
     blocks[FINALIZED].setNumBytes(expectedLen);
 
     newGS = blocks[RBW].getGenerationStamp()+1;


[05/50] [abbrv] hadoop git commit: HDFS-6888. Allow selectively audit logging ops (Contributed by Chen He)

Posted by ji...@apache.org.
HDFS-6888. Allow selectively audit logging ops (Contributed by Chen He)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bef7c80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bef7c80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bef7c80

Branch: refs/heads/HDFS-7240
Commit: 3bef7c80a97709b367781180b2e11fc50653d3c8
Parents: cb8e69a
Author: Vinayakumar B <vi...@apache.org>
Authored: Fri May 15 11:05:01 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Fri May 15 11:07:51 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../hdfs/server/namenode/FSNamesystem.java      |  11 +-
 .../src/main/resources/hdfs-default.xml         |   9 ++
 .../server/namenode/TestAuditLogAtDebug.java    | 131 +++++++++++++++++++
 5 files changed, 152 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 445b7c2..6c0923c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -552,6 +552,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff.
     (Brahma Reddy Battula via aajisaka)
 
+    HDFS-6888. Allow selectively audit logging ops (Chen He via vinayakumarb)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ae056fa..1d0cf4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -341,6 +341,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT = false;
   public static final String  DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = "dfs.namenode.audit.log.async";
   public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false;
+  public static final String  DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST = "dfs.namenode.audit.log.debug.cmdlist";
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
   public static final long    DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 33aaa72..4d82fab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8149,15 +8149,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * defined in the config file. It can also be explicitly listed in the
    * config file.
    */
-  private static class DefaultAuditLogger extends HdfsAuditLogger {
+  @VisibleForTesting
+  static class DefaultAuditLogger extends HdfsAuditLogger {
 
     private boolean logTokenTrackingId;
+    private Set<String> debugCmdSet = new HashSet<String>();
 
     @Override
     public void initialize(Configuration conf) {
       logTokenTrackingId = conf.getBoolean(
           DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY,
           DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT);
+
+      debugCmdSet.addAll(Arrays.asList(conf.getTrimmedStrings(
+          DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST)));
     }
 
     @Override
@@ -8165,7 +8170,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         InetAddress addr, String cmd, String src, String dst,
         FileStatus status, UserGroupInformation ugi,
         DelegationTokenSecretManager dtSecretManager) {
-      if (auditLog.isInfoEnabled()) {
+
+      if (auditLog.isDebugEnabled() ||
+          (auditLog.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
         final StringBuilder sb = auditBuffer.get();
         sb.setLength(0);
         sb.append("allowed=").append(succeeded).append("\t");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 5d1d670..7f0730b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2085,6 +2085,15 @@
 </property>
 
 <property>
+  <name>dfs.namenode.audit.log.debug.cmdlist</name>
+  <value></value>
+  <description>
+    A comma separated list of NameNode commands that are written to the HDFS
+    namenode audit log only if the audit log level is debug.
+  </description>
+</property>
+
+<property>
   <name>dfs.client.use.legacy.blockreader.local</name>
   <value>false</value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
new file mode 100644
index 0000000..ce11514
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.DefaultAuditLogger;
+import org.apache.log4j.Level;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.net.Inet4Address;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+/**
+ * Test that the HDFS Audit logger respects DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST. 
+ */
+public class TestAuditLogAtDebug {
+  static final Log LOG = LogFactory.getLog(TestAuditLogAtDebug.class);
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+  
+  private static final String DUMMY_COMMAND_1 = "dummycommand1";
+  private static final String DUMMY_COMMAND_2 = "dummycommand2";
+  
+  private DefaultAuditLogger makeSpyLogger(
+      Level level, Optional<List<String>> debugCommands) {
+    DefaultAuditLogger logger = new DefaultAuditLogger();
+    Configuration conf = new HdfsConfiguration();
+    if (debugCommands.isPresent()) {
+      conf.set(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST,
+               Joiner.on(",").join(debugCommands.get()));
+    }
+    logger.initialize(conf);
+    ((Log4JLogger) FSNamesystem.auditLog).getLogger().setLevel(level);
+    return spy(logger);
+  }
+  
+  private void logDummyCommandToAuditLog(HdfsAuditLogger logger, String command) {
+    logger.logAuditEvent(true, "",
+                         Inet4Address.getLoopbackAddress(),
+                         command, "", "",
+                         null, null, null);
+  }
+
+  @Test
+  public void testDebugCommandNotLoggedAtInfo() {
+    DefaultAuditLogger logger =
+        makeSpyLogger(
+            Level.INFO, Optional.of(Arrays.asList(DUMMY_COMMAND_1)));
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
+    verify(logger, never()).logAuditMessage(anyString());
+  }
+
+  @Test
+  public void testDebugCommandLoggedAtDebug() {
+    DefaultAuditLogger logger =
+        makeSpyLogger(
+            Level.DEBUG, Optional.of(Arrays.asList(DUMMY_COMMAND_1)));
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
+    verify(logger, times(1)).logAuditMessage(anyString());
+  }
+  
+  @Test
+  public void testInfoCommandLoggedAtInfo() {
+    DefaultAuditLogger logger =
+        makeSpyLogger(
+            Level.INFO, Optional.of(Arrays.asList(DUMMY_COMMAND_1)));
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
+    verify(logger, times(1)).logAuditMessage(anyString());
+  }
+
+  @Test
+  public void testMultipleDebugCommandsNotLoggedAtInfo() {
+    DefaultAuditLogger logger =
+        makeSpyLogger(
+            Level.INFO,
+            Optional.of(Arrays.asList(DUMMY_COMMAND_1, DUMMY_COMMAND_2)));
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
+    verify(logger, never()).logAuditMessage(anyString());
+  }
+
+  @Test
+  public void testMultipleDebugCommandsLoggedAtDebug() {
+    DefaultAuditLogger logger =
+        makeSpyLogger(
+            Level.DEBUG,
+            Optional.of(Arrays.asList(DUMMY_COMMAND_1, DUMMY_COMMAND_2)));
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
+    verify(logger, times(2)).logAuditMessage(anyString());
+  }
+  
+  @Test
+  public void testEmptyDebugCommands() {
+    DefaultAuditLogger logger = makeSpyLogger(
+        Level.INFO, Optional.<List<String>>absent());
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
+    logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
+    verify(logger, times(2)).logAuditMessage(anyString());
+  }
+}


[24/50] [abbrv] hadoop git commit: HDFS-4185. Add a metric for number of active leases (Rakesh R via raviprak)

Posted by ji...@apache.org.
HDFS-4185. Add a metric for number of active leases (Rakesh R via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdfae446
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdfae446
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdfae446

Branch: refs/heads/HDFS-7240
Commit: cdfae446ad285db979a79bf55665363fd943702c
Parents: 0c590e1
Author: Ravi Prakash <ra...@altiscale.com>
Authored: Mon May 18 12:37:21 2015 -0700
Committer: Ravi Prakash <ra...@altiscale.com>
Committed: Mon May 18 12:37:21 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../hdfs/server/namenode/FSNamesystem.java      | 17 ++++++
 .../hdfs/server/namenode/LeaseManager.java      |  9 +++
 .../namenode/metrics/TestNameNodeMetrics.java   | 59 ++++++++++++++++++--
 4 files changed, 83 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdfae446/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7fd3495..35c3b5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -570,6 +570,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8345. Storage policy APIs must be exposed via the FileSystem
     interface. (Arpit Agarwal)
 
+    HDFS-4185. Add a metric for number of active leases (Rakesh R via raviprak)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdfae446/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0fec5ee..7e5b981 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5347,6 +5347,23 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
+   * Get the number of files under construction in the system.
+   */
+  @Metric({ "NumFilesUnderConstruction",
+      "Number of files under construction" })
+  public long getNumFilesUnderConstruction() {
+    return leaseManager.countPath();
+  }
+
+  /**
+   * Get the total number of active clients holding lease in the system.
+   */
+  @Metric({ "NumActiveClients", "Number of active clients holding lease" })
+  public long getNumActiveClients() {
+    return leaseManager.countLease();
+  }
+
+  /**
    * Get the total number of COMPLETE blocks in the system.
    * For safe mode only complete blocks are counted.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdfae446/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index ade2312..0806f82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -130,6 +130,15 @@ public class LeaseManager {
   @VisibleForTesting
   public synchronized int countLease() {return sortedLeases.size();}
 
+  /** @return the number of paths contained in all leases */
+  synchronized int countPath() {
+    int count = 0;
+    for (Lease lease : sortedLeases) {
+      count += lease.getFiles().size();
+    }
+    return count;
+  }
+
   /**
    * Adds (or re-adds) the lease for the specified file.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdfae446/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index b390391..3120f85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
@@ -29,12 +28,12 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.Random;
 import com.google.common.collect.ImmutableList;
-import com.google.common.io.Files;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
@@ -55,12 +54,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.test.MetricsAsserts;
-import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
@@ -567,4 +564,58 @@ public class TestNameNodeMetrics {
     assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
         startWriteCounter);
   }
+
+  /**
+   * Test metrics indicating the number of active clients and the files under
+   * construction
+   */
+  @Test(timeout = 60000)
+  public void testNumActiveClientsAndFilesUnderConstructionMetrics()
+      throws Exception {
+    final Path file1 = getTestPath("testFileAdd1");
+    createFile(file1, 100, (short) 3);
+    assertGauge("NumActiveClients", 0L, getMetrics(NS_METRICS));
+    assertGauge("NumFilesUnderConstruction", 0L, getMetrics(NS_METRICS));
+
+    Path file2 = new Path("/testFileAdd2");
+    FSDataOutputStream output2 = fs.create(file2);
+    output2.writeBytes("Some test data");
+    assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
+    assertGauge("NumFilesUnderConstruction", 1L, getMetrics(NS_METRICS));
+
+    Path file3 = new Path("/testFileAdd3");
+    FSDataOutputStream output3 = fs.create(file3);
+    output3.writeBytes("Some test data");
+    assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
+    assertGauge("NumFilesUnderConstruction", 2L, getMetrics(NS_METRICS));
+
+    // create another DistributedFileSystem client
+    DistributedFileSystem fs1 = (DistributedFileSystem) cluster
+        .getNewFileSystemInstance(0);
+    try {
+      Path file4 = new Path("/testFileAdd4");
+      FSDataOutputStream output4 = fs1.create(file4);
+      output4.writeBytes("Some test data");
+      assertGauge("NumActiveClients", 2L, getMetrics(NS_METRICS));
+      assertGauge("NumFilesUnderConstruction", 3L, getMetrics(NS_METRICS));
+
+      Path file5 = new Path("/testFileAdd35");
+      FSDataOutputStream output5 = fs1.create(file5);
+      output5.writeBytes("Some test data");
+      assertGauge("NumActiveClients", 2L, getMetrics(NS_METRICS));
+      assertGauge("NumFilesUnderConstruction", 4L, getMetrics(NS_METRICS));
+
+      output2.close();
+      output3.close();
+      assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
+      assertGauge("NumFilesUnderConstruction", 2L, getMetrics(NS_METRICS));
+
+      output4.close();
+      output5.close();
+      assertGauge("NumActiveClients", 0L, getMetrics(NS_METRICS));
+      assertGauge("NumFilesUnderConstruction", 0L, getMetrics(NS_METRICS));
+    } finally {
+      fs1.close();
+    }
+  }
 }


[14/50] [abbrv] hadoop git commit: HADOOP-11988. Fix typo in the document for hadoop fs -find. Contributed by Kengo Seki.

Posted by ji...@apache.org.
HADOOP-11988. Fix typo in the document for hadoop fs -find. Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cab0dadb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cab0dadb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cab0dadb

Branch: refs/heads/HDFS-7240
Commit: cab0dadbd878c84e3d94154b4fd1ae3b10f49f93
Parents: e453989
Author: Akira Ajisaka <aa...@apache.org>
Authored: Sun May 17 13:35:23 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Sun May 17 13:35:51 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 .../hadoop-common/src/site/markdown/FileSystemShell.md           | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab0dadb/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index aecfde4..5666035 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -689,6 +689,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-8174. Remove confusing comment in Path#isAbsolute()
     (Suresh Srinivas via vinayakumarb)
 
+    HADOOP-11988. Fix typo in the document for hadoop fs -find.
+    (Kengo Seki via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab0dadb/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 305370d..2920e01 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -286,9 +286,9 @@ The following primary expressions are recognised:
 
     Evaluates as true if the basename of the file matches the pattern using standard file system globbing. If -iname is used then the match is case insensitive.
 
-*   -print<br />-print0Always
+*   -print<br />-print0
 
-    evaluates to true. Causes the current pathname to be written to standard output. If the -print0 expression is used then an ASCII NULL character is appended.
+    Always evaluates to true. Causes the current pathname to be written to standard output. If the -print0 expression is used then an ASCII NULL character is appended.
 
 The following operators are recognised:
 


[43/50] [abbrv] hadoop git commit: YARN-3583. Support of NodeLabel object instead of plain String in YarnClient side. (Sunil G via wangda)

Posted by ji...@apache.org.
YARN-3583. Support of NodeLabel object instead of plain String in YarnClient side. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/563eb1ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/563eb1ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/563eb1ad

Branch: refs/heads/HDFS-7240
Commit: 563eb1ad2ae848a23bbbf32ebfaf107e8fa14e87
Parents: b37da52
Author: Wangda Tan <wa...@apache.org>
Authored: Tue May 19 16:54:38 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue May 19 16:54:38 2015 -0700

----------------------------------------------------------------------
 .../hadoop/mapred/ResourceMgrDelegate.java      |   6 +-
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../GetLabelsToNodesResponse.java               |   7 +-
 .../GetNodesToLabelsResponse.java               |   7 +-
 ..._server_resourcemanager_service_protos.proto |   7 +-
 .../src/main/proto/yarn_protos.proto            |   6 +-
 .../src/main/proto/yarn_service_protos.proto    |   2 +-
 .../hadoop/yarn/client/api/YarnClient.java      |   8 +-
 .../yarn/client/api/impl/YarnClientImpl.java    |   6 +-
 .../yarn/client/api/impl/TestYarnClient.java    |  74 +++++++++++--
 .../impl/pb/GetLabelsToNodesResponsePBImpl.java |  28 +++--
 .../impl/pb/GetNodesToLabelsResponsePBImpl.java |  58 ++++++----
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java    |  18 +--
 .../server/resourcemanager/ClientRMService.java |   6 +-
 .../resourcemanager/TestClientRMService.java    | 110 +++++++++++--------
 15 files changed, 226 insertions(+), 120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 2b7cd5f..90f6876 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -444,19 +444,19 @@ public class ResourceMgrDelegate extends YarnClient {
   }
 
   @Override
-  public Map<NodeId, Set<String>> getNodeToLabels() throws YarnException,
+  public Map<NodeId, Set<NodeLabel>> getNodeToLabels() throws YarnException,
       IOException {
     return client.getNodeToLabels();
   }
 
   @Override
-  public Map<String, Set<NodeId>> getLabelsToNodes() throws YarnException,
+  public Map<NodeLabel, Set<NodeId>> getLabelsToNodes() throws YarnException,
       IOException {
     return client.getLabelsToNodes();
   }
 
   @Override
-  public Map<String, Set<NodeId>> getLabelsToNodes(Set<String> labels)
+  public Map<NodeLabel, Set<NodeId>> getLabelsToNodes(Set<String> labels)
       throws YarnException, IOException {
     return client.getLabelsToNodes(labels);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ab6f488..9ba9fd8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -250,6 +250,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use 
     NodeLabel object instead of String. (Naganarasimha G R via wangda)
 
+    YARN-3583. Support of NodeLabel object instead of plain String 
+    in YarnClient side. (Sunil G via wangda)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
index f105359..da2be28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
@@ -24,11 +24,12 @@ import java.util.Set;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.util.Records;
 
 public abstract class GetLabelsToNodesResponse {
   public static GetLabelsToNodesResponse newInstance(
-      Map<String, Set<NodeId>> map) {
+      Map<NodeLabel, Set<NodeId>> map) {
 	GetLabelsToNodesResponse response =
         Records.newRecord(GetLabelsToNodesResponse.class);
     response.setLabelsToNodes(map);
@@ -37,9 +38,9 @@ public abstract class GetLabelsToNodesResponse {
 
   @Public
   @Evolving
-  public abstract void setLabelsToNodes(Map<String, Set<NodeId>> map);
+  public abstract void setLabelsToNodes(Map<NodeLabel, Set<NodeId>> map);
 
   @Public
   @Evolving
-  public abstract Map<String, Set<NodeId>> getLabelsToNodes();
+  public abstract Map<NodeLabel, Set<NodeId>> getLabelsToNodes();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToLabelsResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToLabelsResponse.java
index bcd5421..432485c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToLabelsResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToLabelsResponse.java
@@ -24,11 +24,12 @@ import java.util.Set;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.util.Records;
 
 public abstract class GetNodesToLabelsResponse {
   public static GetNodesToLabelsResponse newInstance(
-      Map<NodeId, Set<String>> map) {
+      Map<NodeId, Set<NodeLabel>> map) {
     GetNodesToLabelsResponse response =
         Records.newRecord(GetNodesToLabelsResponse.class);
     response.setNodeToLabels(map);
@@ -37,9 +38,9 @@ public abstract class GetNodesToLabelsResponse {
 
   @Public
   @Evolving
-  public abstract void setNodeToLabels(Map<NodeId, Set<String>> map);
+  public abstract void setNodeToLabels(Map<NodeId, Set<NodeLabel>> map);
 
   @Public
   @Evolving
-  public abstract Map<NodeId, Set<String>> getNodeToLabels();
+  public abstract Map<NodeId, Set<NodeLabel>> getNodeToLabels();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index d6d8713..e20b4ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -91,7 +91,7 @@ message RemoveFromClusterNodeLabelsResponseProto {
 }
 
 message ReplaceLabelsOnNodeRequestProto {
-  repeated NodeIdToLabelsProto nodeToLabels = 1;
+  repeated NodeIdToLabelsNameProto nodeToLabels = 1;
 }
 
 message ReplaceLabelsOnNodeResponseProto {
@@ -107,6 +107,11 @@ message CheckForDecommissioningNodesResponseProto {
   repeated NodeIdProto decommissioningNodes = 1;
 }
 
+message NodeIdToLabelsNameProto {
+  optional NodeIdProto nodeId = 1;
+  repeated string nodeLabels = 2;
+}
+
 enum DecommissionTypeProto {
   NORMAL = 1;
   GRACEFUL = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 3c4aa52..b9969b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -248,13 +248,13 @@ message NodeReportProto {
   repeated string node_labels = 10;
 }
 
-message NodeIdToLabelsProto {
+message NodeIdToLabelsInfoProto {
   optional NodeIdProto nodeId = 1;
-  repeated string nodeLabels = 2;
+  repeated NodeLabelProto nodeLabels = 2;
 }
 
 message LabelsToNodeIdsProto {
-  optional string nodeLabels = 1;
+  optional NodeLabelProto nodeLabels = 1;
   repeated NodeIdProto nodeId = 2;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 410b663..098785a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -198,7 +198,7 @@ message GetNodesToLabelsRequestProto {
 }
 
 message GetNodesToLabelsResponseProto {
-  repeated NodeIdToLabelsProto nodeToLabels = 1;
+  repeated NodeIdToLabelsInfoProto nodeToLabels = 1;
 }
 
 message GetLabelsToNodesRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 5ce626c..ff03c7d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -619,7 +619,7 @@ public abstract class YarnClient extends AbstractService {
    */
   @Public
   @Unstable
-  public abstract Map<NodeId, Set<String>> getNodeToLabels()
+  public abstract Map<NodeId, Set<NodeLabel>> getNodeToLabels()
       throws YarnException, IOException;
 
   /**
@@ -634,7 +634,7 @@ public abstract class YarnClient extends AbstractService {
    */
   @Public
   @Unstable
-  public abstract Map<String, Set<NodeId>> getLabelsToNodes()
+  public abstract Map<NodeLabel, Set<NodeId>> getLabelsToNodes()
       throws YarnException, IOException;
 
   /**
@@ -650,8 +650,8 @@ public abstract class YarnClient extends AbstractService {
    */
   @Public
   @Unstable
-  public abstract Map<String, Set<NodeId>> getLabelsToNodes(Set<String> labels)
-      throws YarnException, IOException;
+  public abstract Map<NodeLabel, Set<NodeId>> getLabelsToNodes(
+      Set<String> labels) throws YarnException, IOException;
 
   /**
    * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 42dd5cd..be4c8c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -795,21 +795,21 @@ public class YarnClientImpl extends YarnClient {
   }
   
   @Override
-  public Map<NodeId, Set<String>> getNodeToLabels() throws YarnException,
+  public Map<NodeId, Set<NodeLabel>> getNodeToLabels() throws YarnException,
       IOException {
     return rmClient.getNodeToLabels(GetNodesToLabelsRequest.newInstance())
         .getNodeToLabels();
   }
 
   @Override
-  public Map<String, Set<NodeId>> getLabelsToNodes() throws YarnException,
+  public Map<NodeLabel, Set<NodeId>> getLabelsToNodes() throws YarnException,
       IOException {
     return rmClient.getLabelsToNodes(GetLabelsToNodesRequest.newInstance())
         .getLabelsToNodes();
   }
 
   @Override
-  public Map<String, Set<NodeId>> getLabelsToNodes(Set<String> labels)
+  public Map<NodeLabel, Set<NodeId>> getLabelsToNodes(Set<String> labels)
       throws YarnException, IOException {
     return rmClient.getLabelsToNodes(
         GetLabelsToNodesRequest.newInstance(labels)).getLabelsToNodes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 10b9bbb..511fa4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -67,6 +67,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
@@ -87,6 +89,7 @@ import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -458,9 +461,9 @@ public class TestYarnClient {
     client.start();
 
     // Get labels to nodes mapping
-    Map<String, Set<NodeId>> expectedLabelsToNodes =
+    Map<NodeLabel, Set<NodeId>> expectedLabelsToNodes =
         ((MockYarnClient)client).getLabelsToNodesMap();
-    Map<String, Set<NodeId>> labelsToNodes = client.getLabelsToNodes();
+    Map<NodeLabel, Set<NodeId>> labelsToNodes = client.getLabelsToNodes();
     Assert.assertEquals(labelsToNodes, expectedLabelsToNodes);
     Assert.assertEquals(labelsToNodes.size(), 3);
 
@@ -476,7 +479,32 @@ public class TestYarnClient {
     client.close();
   }
 
+  @Test (timeout = 10000)
+  public void testGetNodesToLabels() throws YarnException, IOException {
+    Configuration conf = new Configuration();
+    final YarnClient client = new MockYarnClient();
+    client.init(conf);
+    client.start();
+
+    // Get labels to nodes mapping
+    Map<NodeId, Set<NodeLabel>> expectedNodesToLabels = ((MockYarnClient) client)
+        .getNodeToLabelsMap();
+    Map<NodeId, Set<NodeLabel>> nodesToLabels = client.getNodeToLabels();
+    Assert.assertEquals(nodesToLabels, expectedNodesToLabels);
+    Assert.assertEquals(nodesToLabels.size(), 1);
+
+    // Verify exclusivity
+    Set<NodeLabel> labels = nodesToLabels.get(NodeId.newInstance("host", 0));
+    for (NodeLabel label : labels) {
+      Assert.assertFalse(label.isExclusive());
+    }
+
+    client.stop();
+    client.close();
+  }
+
   private static class MockYarnClient extends YarnClientImpl {
+
     private ApplicationReport mockReport;
     private List<ApplicationReport> reports;
     private HashMap<ApplicationId, List<ApplicationAttemptReport>> attempts = 
@@ -498,6 +526,8 @@ public class TestYarnClient {
       mock(GetContainerReportResponse.class);
     GetLabelsToNodesResponse mockLabelsToNodesResponse =
       mock(GetLabelsToNodesResponse.class);
+    GetNodesToLabelsResponse mockNodeToLabelsResponse =
+        mock(GetNodesToLabelsResponse.class);
 
     public MockYarnClient() {
       super();
@@ -537,6 +567,9 @@ public class TestYarnClient {
         when(rmClient.getLabelsToNodes(any(GetLabelsToNodesRequest.class)))
             .thenReturn(mockLabelsToNodesResponse);
         
+        when(rmClient.getNodeToLabels(any(GetNodesToLabelsRequest.class)))
+            .thenReturn(mockNodeToLabelsResponse);
+
         historyClient = mock(AHSClient.class);
         
       } catch (YarnException e) {
@@ -704,7 +737,7 @@ public class TestYarnClient {
     }
 
     @Override
-    public Map<String, Set<NodeId>> getLabelsToNodes()
+    public Map<NodeLabel, Set<NodeId>> getLabelsToNodes()
         throws YarnException, IOException {
       when(mockLabelsToNodesResponse.getLabelsToNodes()).thenReturn(
           getLabelsToNodesMap());
@@ -712,36 +745,53 @@ public class TestYarnClient {
     }
 
     @Override
-    public Map<String, Set<NodeId>> getLabelsToNodes(Set<String> labels)
+    public Map<NodeLabel, Set<NodeId>> getLabelsToNodes(Set<String> labels)
         throws YarnException, IOException {
       when(mockLabelsToNodesResponse.getLabelsToNodes()).thenReturn(
           getLabelsToNodesMap(labels));
       return super.getLabelsToNodes(labels);
     }
 
-    public Map<String, Set<NodeId>> getLabelsToNodesMap() {
-      Map<String, Set<NodeId>> map = new HashMap<String, Set<NodeId>>();
+    public Map<NodeLabel, Set<NodeId>> getLabelsToNodesMap() {
+      Map<NodeLabel, Set<NodeId>> map = new HashMap<NodeLabel, Set<NodeId>>();
       Set<NodeId> setNodeIds =
           new HashSet<NodeId>(Arrays.asList(
           NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0)));
-      map.put("x", setNodeIds);
-      map.put("y", setNodeIds);
-      map.put("z", setNodeIds);
+      map.put(NodeLabel.newInstance("x"), setNodeIds);
+      map.put(NodeLabel.newInstance("y"), setNodeIds);
+      map.put(NodeLabel.newInstance("z"), setNodeIds);
       return map;
     }
 
-    public Map<String, Set<NodeId>> getLabelsToNodesMap(Set<String> labels) {
-      Map<String, Set<NodeId>> map = new HashMap<String, Set<NodeId>>();
+    public Map<NodeLabel, Set<NodeId>> getLabelsToNodesMap(Set<String> labels) {
+      Map<NodeLabel, Set<NodeId>> map = new HashMap<NodeLabel, Set<NodeId>>();
       Set<NodeId> setNodeIds =
           new HashSet<NodeId>(Arrays.asList(
           NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0)));
       for(String label : labels) {
-        map.put(label, setNodeIds);
+        map.put(NodeLabel.newInstance(label), setNodeIds);
       }
       return map;
     }
 
     @Override
+    public Map<NodeId, Set<NodeLabel>> getNodeToLabels() throws YarnException,
+        IOException {
+      when(mockNodeToLabelsResponse.getNodeToLabels()).thenReturn(
+          getNodeToLabelsMap());
+      return super.getNodeToLabels();
+    }
+
+    public Map<NodeId, Set<NodeLabel>> getNodeToLabelsMap() {
+      Map<NodeId, Set<NodeLabel>> map = new HashMap<NodeId, Set<NodeLabel>>();
+      Set<NodeLabel> setNodeLabels = new HashSet<NodeLabel>(Arrays.asList(
+          NodeLabel.newInstance("x", false),
+          NodeLabel.newInstance("y", false)));
+      map.put(NodeId.newInstance("host", 0), setNodeLabels);
+      return map;
+    }
+
+    @Override
     public List<ApplicationAttemptReport> getApplicationAttempts(
         ApplicationId appId) throws YarnException, IOException {
       when(mockAppAttemptsResponse.getApplicationAttemptList()).thenReturn(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetLabelsToNodesResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetLabelsToNodesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetLabelsToNodesResponsePBImpl.java
index e197997..418fcbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetLabelsToNodesResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetLabelsToNodesResponsePBImpl.java
@@ -29,11 +29,13 @@ import java.util.Set;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
 import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
-
 import org.apache.hadoop.yarn.proto.YarnProtos.LabelsToNodeIdsProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetLabelsToNodesResponseProtoOrBuilder;
 
@@ -44,7 +46,7 @@ public class GetLabelsToNodesResponsePBImpl extends
   GetLabelsToNodesResponseProto.Builder builder = null;
   boolean viaProto = false;
 
-  private Map<String, Set<NodeId>> labelsToNodes;
+  private Map<NodeLabel, Set<NodeId>> labelsToNodes;
 
   public GetLabelsToNodesResponsePBImpl() {
     this.builder = GetLabelsToNodesResponseProto.newBuilder();
@@ -61,7 +63,7 @@ public class GetLabelsToNodesResponsePBImpl extends
     }
     GetLabelsToNodesResponseProtoOrBuilder p = viaProto ? proto : builder;
     List<LabelsToNodeIdsProto> list = p.getLabelsToNodesList();
-    this.labelsToNodes = new HashMap<String, Set<NodeId>>();
+    this.labelsToNodes = new HashMap<NodeLabel, Set<NodeId>>();
 
     for (LabelsToNodeIdsProto c : list) {
       Set<NodeId> setNodes = new HashSet<NodeId>();
@@ -69,8 +71,9 @@ public class GetLabelsToNodesResponsePBImpl extends
         NodeId node = new NodeIdPBImpl(n);
         setNodes.add(node);
       }
-      if(!setNodes.isEmpty()) {
-        this.labelsToNodes.put(c.getNodeLabels(), setNodes);
+      if (!setNodes.isEmpty()) {
+        this.labelsToNodes
+            .put(new NodeLabelPBImpl(c.getNodeLabels()), setNodes);
       }
     }
   }
@@ -94,7 +97,7 @@ public class GetLabelsToNodesResponsePBImpl extends
           public Iterator<LabelsToNodeIdsProto> iterator() {
             return new Iterator<LabelsToNodeIdsProto>() {
 
-              Iterator<Entry<String, Set<NodeId>>> iter =
+              Iterator<Entry<NodeLabel, Set<NodeId>>> iter =
                   labelsToNodes.entrySet().iterator();
 
               @Override
@@ -104,13 +107,14 @@ public class GetLabelsToNodesResponsePBImpl extends
 
               @Override
               public LabelsToNodeIdsProto next() {
-                Entry<String, Set<NodeId>> now = iter.next();
+                Entry<NodeLabel, Set<NodeId>> now = iter.next();
                 Set<NodeIdProto> nodeProtoSet = new HashSet<NodeIdProto>();
                 for(NodeId n : now.getValue()) {
                   nodeProtoSet.add(convertToProtoFormat(n));
                 }
                 return LabelsToNodeIdsProto.newBuilder()
-                    .setNodeLabels(now.getKey()).addAllNodeId(nodeProtoSet)
+                    .setNodeLabels(convertToProtoFormat(now.getKey()))
+                    .addAllNodeId(nodeProtoSet)
                     .build();
               }
 
@@ -149,6 +153,10 @@ public class GetLabelsToNodesResponsePBImpl extends
     return ((NodeIdPBImpl)t).getProto();
   }
 
+  private NodeLabelProto convertToProtoFormat(NodeLabel l) {
+    return ((NodeLabelPBImpl)l).getProto();
+  }
+
   @Override
   public int hashCode() {
     assert false : "hashCode not designed";
@@ -168,7 +176,7 @@ public class GetLabelsToNodesResponsePBImpl extends
   @Override
   @Public
   @Evolving
-  public void setLabelsToNodes(Map<String, Set<NodeId>> map) {
+  public void setLabelsToNodes(Map<NodeLabel, Set<NodeId>> map) {
     initLabelsToNodes();
     labelsToNodes.clear();
     labelsToNodes.putAll(map);
@@ -177,7 +185,7 @@ public class GetLabelsToNodesResponsePBImpl extends
   @Override
   @Public
   @Evolving
-  public Map<String, Set<NodeId>> getLabelsToNodes() {
+  public Map<NodeLabel, Set<NodeId>> getLabelsToNodes() {
     initLabelsToNodes();
     return this.labelsToNodes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java
index 3404830..52be73f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToLabelsResponsePBImpl.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -26,12 +27,13 @@ import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
-
-import com.google.common.collect.Sets;
-import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdToLabelsProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdToLabelsInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToLabelsResponseProtoOrBuilder;
 
@@ -42,8 +44,8 @@ public class GetNodesToLabelsResponsePBImpl extends
   GetNodesToLabelsResponseProto.Builder builder = null;
   boolean viaProto = false;
 
-  private Map<NodeId, Set<String>> nodeToLabels;
-  
+  private Map<NodeId, Set<NodeLabel>> nodeToLabels;
+
   public GetNodesToLabelsResponsePBImpl() {
     this.builder = GetNodesToLabelsResponseProto.newBuilder();
   }
@@ -58,12 +60,15 @@ public class GetNodesToLabelsResponsePBImpl extends
       return;
     }
     GetNodesToLabelsResponseProtoOrBuilder p = viaProto ? proto : builder;
-    List<NodeIdToLabelsProto> list = p.getNodeToLabelsList();
-    this.nodeToLabels = new HashMap<NodeId, Set<String>>();
-
-    for (NodeIdToLabelsProto c : list) {
-      this.nodeToLabels.put(new NodeIdPBImpl(c.getNodeId()),
-          Sets.newHashSet(c.getNodeLabelsList()));
+    List<NodeIdToLabelsInfoProto> list = p.getNodeToLabelsList();
+    this.nodeToLabels = new HashMap<NodeId, Set<NodeLabel>>();
+
+    for (NodeIdToLabelsInfoProto c : list) {
+      Set<NodeLabel> labels = new HashSet<NodeLabel>();
+      for (NodeLabelProto l : c.getNodeLabelsList()) {
+        labels.add(new NodeLabelPBImpl(l));
+      }
+      this.nodeToLabels.put(new NodeIdPBImpl(c.getNodeId()), labels);
     }
   }
 
@@ -80,13 +85,13 @@ public class GetNodesToLabelsResponsePBImpl extends
     if (nodeToLabels == null) {
       return;
     }
-    Iterable<NodeIdToLabelsProto> iterable =
-        new Iterable<NodeIdToLabelsProto>() {
+    Iterable<NodeIdToLabelsInfoProto> iterable =
+        new Iterable<NodeIdToLabelsInfoProto>() {
           @Override
-          public Iterator<NodeIdToLabelsProto> iterator() {
-            return new Iterator<NodeIdToLabelsProto>() {
+          public Iterator<NodeIdToLabelsInfoProto> iterator() {
+            return new Iterator<NodeIdToLabelsInfoProto>() {
 
-              Iterator<Entry<NodeId, Set<String>>> iter = nodeToLabels
+              Iterator<Entry<NodeId, Set<NodeLabel>>> iter = nodeToLabels
                   .entrySet().iterator();
 
               @Override
@@ -95,11 +100,16 @@ public class GetNodesToLabelsResponsePBImpl extends
               }
 
               @Override
-              public NodeIdToLabelsProto next() {
-                Entry<NodeId, Set<String>> now = iter.next();
-                return NodeIdToLabelsProto.newBuilder()
+              public NodeIdToLabelsInfoProto next() {
+                Entry<NodeId, Set<NodeLabel>> now = iter.next();
+                Set<NodeLabelProto> labelProtoList =
+                    new HashSet<NodeLabelProto>();
+                for (NodeLabel l : now.getValue()) {
+                  labelProtoList.add(convertToProtoFormat(l));
+                }
+                return NodeIdToLabelsInfoProto.newBuilder()
                     .setNodeId(convertToProtoFormat(now.getKey()))
-                    .addAllNodeLabels(now.getValue()).build();
+                    .addAllNodeLabels(labelProtoList).build();
               }
 
               @Override
@@ -134,13 +144,13 @@ public class GetNodesToLabelsResponsePBImpl extends
   }
 
   @Override
-  public Map<NodeId, Set<String>> getNodeToLabels() {
+  public Map<NodeId, Set<NodeLabel>> getNodeToLabels() {
     initNodeToLabels();
     return this.nodeToLabels;
   }
 
   @Override
-  public void setNodeToLabels(Map<NodeId, Set<String>> map) {
+  public void setNodeToLabels(Map<NodeId, Set<NodeLabel>> map) {
     initNodeToLabels();
     nodeToLabels.clear();
     nodeToLabels.putAll(map);
@@ -150,6 +160,10 @@ public class GetNodesToLabelsResponsePBImpl extends
     return ((NodeIdPBImpl)t).getProto();
   }
   
+  private NodeLabelProto convertToProtoFormat(NodeLabel t) {
+    return ((NodeLabelPBImpl)t).getProto();
+  }
+
   @Override
   public int hashCode() {
     assert false : "hashCode not designed";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
index e296aaf..22e561c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
@@ -28,7 +28,7 @@ import java.util.Set;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
-import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdToLabelsProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeIdToLabelsNameProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProtoOrBuilder;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
@@ -58,10 +58,10 @@ public class ReplaceLabelsOnNodeRequestPBImpl extends
       return;
     }
     ReplaceLabelsOnNodeRequestProtoOrBuilder p = viaProto ? proto : builder;
-    List<NodeIdToLabelsProto> list = p.getNodeToLabelsList();
+    List<NodeIdToLabelsNameProto> list = p.getNodeToLabelsList();
     this.nodeIdToLabels = new HashMap<NodeId, Set<String>>();
 
-    for (NodeIdToLabelsProto c : list) {
+    for (NodeIdToLabelsNameProto c : list) {
       this.nodeIdToLabels.put(new NodeIdPBImpl(c.getNodeId()),
           Sets.newHashSet(c.getNodeLabelsList()));
     }
@@ -80,11 +80,11 @@ public class ReplaceLabelsOnNodeRequestPBImpl extends
     if (nodeIdToLabels == null) {
       return;
     }
-    Iterable<NodeIdToLabelsProto> iterable =
-        new Iterable<NodeIdToLabelsProto>() {
+    Iterable<NodeIdToLabelsNameProto> iterable =
+        new Iterable<NodeIdToLabelsNameProto>() {
           @Override
-          public Iterator<NodeIdToLabelsProto> iterator() {
-            return new Iterator<NodeIdToLabelsProto>() {
+          public Iterator<NodeIdToLabelsNameProto> iterator() {
+            return new Iterator<NodeIdToLabelsNameProto>() {
 
               Iterator<Entry<NodeId, Set<String>>> iter = nodeIdToLabels
                   .entrySet().iterator();
@@ -95,9 +95,9 @@ public class ReplaceLabelsOnNodeRequestPBImpl extends
               }
 
               @Override
-              public NodeIdToLabelsProto next() {
+              public NodeIdToLabelsNameProto next() {
                 Entry<NodeId, Set<String>> now = iter.next();
-                return NodeIdToLabelsProto.newBuilder()
+                return NodeIdToLabelsNameProto.newBuilder()
                     .setNodeId(convertToProtoFormat(now.getKey())).clearNodeLabels()
                     .addAllNodeLabels(now.getValue()).build();
               }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 428b9eb..e4199be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1227,7 +1227,7 @@ public class ClientRMService extends AbstractService implements
       GetNodesToLabelsRequest request) throws YarnException, IOException {
     RMNodeLabelsManager labelsMgr = rmContext.getNodeLabelManager();
     GetNodesToLabelsResponse response =
-        GetNodesToLabelsResponse.newInstance(labelsMgr.getNodeLabels());
+        GetNodesToLabelsResponse.newInstance(labelsMgr.getNodeLabelsInfo());
     return response;
   }
 
@@ -1237,10 +1237,10 @@ public class ClientRMService extends AbstractService implements
     RMNodeLabelsManager labelsMgr = rmContext.getNodeLabelManager();
     if (request.getNodeLabels() == null || request.getNodeLabels().isEmpty()) {
       return GetLabelsToNodesResponse.newInstance(
-          labelsMgr.getLabelsToNodes());
+          labelsMgr.getLabelsInfoToNodes());
     } else {
       return GetLabelsToNodesResponse.newInstance(
-          labelsMgr.getLabelsToNodes(request.getNodeLabels()));
+          labelsMgr.getLabelsInfoToNodes(request.getNodeLabels()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index a39f94f..20343a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -1407,8 +1407,10 @@ public class TestClientRMService {
       };
     };
     rm.start();
+    NodeLabel labelX = NodeLabel.newInstance("x", false);
+    NodeLabel labelY = NodeLabel.newInstance("y");
     RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager();
-    labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
+    labelsMgr.addToCluserNodeLabels(ImmutableSet.of(labelX, labelY));
 
     NodeId node1 = NodeId.newInstance("host1", 1234);
     NodeId node2 = NodeId.newInstance("host2", 1234);
@@ -1422,25 +1424,37 @@ public class TestClientRMService {
     YarnRPC rpc = YarnRPC.create(conf);
     InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
     LOG.info("Connecting to ResourceManager at " + rmAddress);
-    ApplicationClientProtocol client =
-        (ApplicationClientProtocol) rpc.getProxy(
-            ApplicationClientProtocol.class, rmAddress, conf);
+    ApplicationClientProtocol client = (ApplicationClientProtocol) rpc
+        .getProxy(ApplicationClientProtocol.class, rmAddress, conf);
 
     // Get node labels collection
-    GetClusterNodeLabelsResponse response =
-        client.getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
+    GetClusterNodeLabelsResponse response = client
+        .getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
     Assert.assertTrue(response.getNodeLabels().containsAll(
-        Arrays.asList(NodeLabel.newInstance("x"), NodeLabel.newInstance("y"))));
+        Arrays.asList(labelX, labelY)));
 
     // Get node labels mapping
-    GetNodesToLabelsResponse response1 =
-        client.getNodeToLabels(GetNodesToLabelsRequest.newInstance());
-    Map<NodeId, Set<String>> nodeToLabels = response1.getNodeToLabels();
+    GetNodesToLabelsResponse response1 = client
+        .getNodeToLabels(GetNodesToLabelsRequest.newInstance());
+    Map<NodeId, Set<NodeLabel>> nodeToLabels = response1.getNodeToLabels();
     Assert.assertTrue(nodeToLabels.keySet().containsAll(
         Arrays.asList(node1, node2)));
-    Assert.assertTrue(nodeToLabels.get(node1).containsAll(Arrays.asList("x")));
-    Assert.assertTrue(nodeToLabels.get(node2).containsAll(Arrays.asList("y")));
-    
+    Assert.assertTrue(nodeToLabels.get(node1)
+        .containsAll(Arrays.asList(labelX)));
+    Assert.assertTrue(nodeToLabels.get(node2)
+        .containsAll(Arrays.asList(labelY)));
+    // Verify whether labelX's exclusivity is false
+    for (NodeLabel x : nodeToLabels.get(node1)) {
+      Assert.assertFalse(x.isExclusive());
+    }
+    // Verify whether labelY's exclusivity is true
+    for (NodeLabel y : nodeToLabels.get(node2)) {
+      Assert.assertTrue(y.isExclusive());
+    }
+    // Below label "x" is not present in the response as exclusivity is true
+    Assert.assertFalse(nodeToLabels.get(node1).containsAll(
+        Arrays.asList(NodeLabel.newInstance("x"))));
+
     rpc.stopProxy(client, conf);
     rm.close();
   }
@@ -1456,8 +1470,12 @@ public class TestClientRMService {
       };
     };
     rm.start();
+
+    NodeLabel labelX = NodeLabel.newInstance("x", false);
+    NodeLabel labelY = NodeLabel.newInstance("y", false);
+    NodeLabel labelZ = NodeLabel.newInstance("z", false);
     RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager();
-    labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "z"));
+    labelsMgr.addToCluserNodeLabels(ImmutableSet.of(labelX, labelY, labelZ));
 
     NodeId node1A = NodeId.newInstance("host1", 1234);
     NodeId node1B = NodeId.newInstance("host1", 5678);
@@ -1477,43 +1495,49 @@ public class TestClientRMService {
     YarnRPC rpc = YarnRPC.create(conf);
     InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
     LOG.info("Connecting to ResourceManager at " + rmAddress);
-    ApplicationClientProtocol client =
-        (ApplicationClientProtocol) rpc.getProxy(
-            ApplicationClientProtocol.class, rmAddress, conf);
+    ApplicationClientProtocol client = (ApplicationClientProtocol) rpc
+        .getProxy(ApplicationClientProtocol.class, rmAddress, conf);
 
     // Get node labels collection
-    GetClusterNodeLabelsResponse response =
-        client.getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
+    GetClusterNodeLabelsResponse response = client
+        .getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
     Assert.assertTrue(response.getNodeLabels().containsAll(
-        Arrays.asList(NodeLabel.newInstance("x"), NodeLabel.newInstance("y"),
-            NodeLabel.newInstance("z"))));
+        Arrays.asList(labelX, labelY, labelZ)));
 
     // Get labels to nodes mapping
-    GetLabelsToNodesResponse response1 =
-        client.getLabelsToNodes(GetLabelsToNodesRequest.newInstance());
-    Map<String, Set<NodeId>> labelsToNodes = response1.getLabelsToNodes();
-    Assert.assertTrue(
-        labelsToNodes.keySet().containsAll(Arrays.asList("x", "y", "z")));
-    Assert.assertTrue(
-        labelsToNodes.get("x").containsAll(Arrays.asList(node1A)));
-    Assert.assertTrue(
-        labelsToNodes.get("y").containsAll(Arrays.asList(node2A, node3A)));
-    Assert.assertTrue(
-        labelsToNodes.get("z").containsAll(Arrays.asList(node1B, node3B)));
+    GetLabelsToNodesResponse response1 = client
+        .getLabelsToNodes(GetLabelsToNodesRequest.newInstance());
+    Map<NodeLabel, Set<NodeId>> labelsToNodes = response1.getLabelsToNodes();
+    // Verify whether all NodeLabel's exclusivity are false
+    for (Map.Entry<NodeLabel, Set<NodeId>> nltn : labelsToNodes.entrySet()) {
+      Assert.assertFalse(nltn.getKey().isExclusive());
+    }
+    Assert.assertTrue(labelsToNodes.keySet().containsAll(
+        Arrays.asList(labelX, labelY, labelZ)));
+    Assert.assertTrue(labelsToNodes.get(labelX).containsAll(
+        Arrays.asList(node1A)));
+    Assert.assertTrue(labelsToNodes.get(labelY).containsAll(
+        Arrays.asList(node2A, node3A)));
+    Assert.assertTrue(labelsToNodes.get(labelZ).containsAll(
+        Arrays.asList(node1B, node3B)));
 
     // Get labels to nodes mapping for specific labels
-    Set<String> setlabels =
-        new HashSet<String>(Arrays.asList(new String[]{"x", "z"}));
-    GetLabelsToNodesResponse response2 =
-        client.getLabelsToNodes(GetLabelsToNodesRequest.newInstance(setlabels));
+    Set<String> setlabels = new HashSet<String>(Arrays.asList(new String[]{"x",
+        "z"}));
+    GetLabelsToNodesResponse response2 = client
+        .getLabelsToNodes(GetLabelsToNodesRequest.newInstance(setlabels));
     labelsToNodes = response2.getLabelsToNodes();
-    Assert.assertTrue(
-        labelsToNodes.keySet().containsAll(Arrays.asList("x", "z")));
-    Assert.assertTrue(
-        labelsToNodes.get("x").containsAll(Arrays.asList(node1A)));
-    Assert.assertTrue(
-        labelsToNodes.get("z").containsAll(Arrays.asList(node1B, node3B)));
-    Assert.assertEquals(labelsToNodes.get("y"), null);
+    // Verify whether all NodeLabel's exclusivity are false
+    for (Map.Entry<NodeLabel, Set<NodeId>> nltn : labelsToNodes.entrySet()) {
+      Assert.assertFalse(nltn.getKey().isExclusive());
+    }
+    Assert.assertTrue(labelsToNodes.keySet().containsAll(
+        Arrays.asList(labelX, labelZ)));
+    Assert.assertTrue(labelsToNodes.get(labelX).containsAll(
+        Arrays.asList(node1A)));
+    Assert.assertTrue(labelsToNodes.get(labelZ).containsAll(
+        Arrays.asList(node1B, node3B)));
+    Assert.assertEquals(labelsToNodes.get(labelY), null);
 
     rpc.stopProxy(client, conf);
     rm.close();


[17/50] [abbrv] hadoop git commit: Updating CHANGES.txt for moving entry of HDFS-8332 from branch-2 to trunk

Posted by ji...@apache.org.
Updating CHANGES.txt for moving entry of HDFS-8332 from branch-2 to trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/363c3554
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/363c3554
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/363c3554

Branch: refs/heads/HDFS-7240
Commit: 363c35541d4f9da4974f3e346cb397796173824c
Parents: a46506d
Author: Uma Maheswara Rao G <um...@apache.org>
Authored: Mon May 18 14:48:49 2015 +0530
Committer: Uma Maheswara Rao G <um...@apache.org>
Committed: Mon May 18 14:48:49 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/363c3554/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8c823ef..3e0d360 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -330,6 +330,8 @@ Trunk (Unreleased)
     HDFS-7673. synthetic load generator docs give incorrect/incomplete commands
     (Brahma Reddy Battula via aw)
 
+    HDFS-8332. DFS client API calls should check filesystem closed (Rakesh R via umamahesh)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -711,8 +713,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun()
     (Sanghyun Yun via vinayakumarb)
 
-    HDFS-8332. DFS client API calls should check filesystem closed (Rakesh R via umamahesh)
-
     HDFS-7998. HDFS Federation : Command mentioned to add a NN to existing
     federated cluster is wrong (Ajith S via vinayakumarb)
 


[45/50] [abbrv] hadoop git commit: HADOOP-11995. Make jetty version configurable from the maven command line. Contributed by Sriharsha Devineni.

Posted by ji...@apache.org.
HADOOP-11995. Make jetty version configurable from the maven command line. Contributed by Sriharsha Devineni.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce53c8eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce53c8eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce53c8eb

Branch: refs/heads/HDFS-7240
Commit: ce53c8eb0ccc582957ba1f4c0b7938db00f6ca31
Parents: 7401e5b
Author: Haohui Mai <wh...@apache.org>
Authored: Tue May 19 18:28:20 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue May 19 18:28:20 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-project/pom.xml                          | 8 ++++----
 2 files changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce53c8eb/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3e7cb39..b0b8fb7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -587,6 +587,9 @@ Release 2.8.0 - UNRELEASED
 
     HADOOP-11103. Clean up RemoteException (Sean Busbey via vinayakumarb)
 
+    HADOOP-11995. Make jetty version configurable from the maven command line.
+    (Sriharsha Devineni via wheat9)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce53c8eb/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f23a2dd..78903fa 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -32,7 +32,7 @@
   <properties>
     <failIfNoTests>false</failIfNoTests>
     <maven.test.redirectTestOutputToFile>true</maven.test.redirectTestOutputToFile>
-
+    <jetty.version>6.1.26</jetty.version>
     <test.exclude>_</test.exclude>
     <test.exclude.pattern>_</test.exclude.pattern>
 
@@ -461,7 +461,7 @@
       <dependency>
         <groupId>org.mortbay.jetty</groupId>
         <artifactId>jetty</artifactId>
-        <version>6.1.26</version>
+        <version>${jetty.version}</version>
         <exclusions>
           <exclusion>
             <groupId>org.mortbay.jetty</groupId>
@@ -472,7 +472,7 @@
       <dependency>
         <groupId>org.mortbay.jetty</groupId>
         <artifactId>jetty-util</artifactId>
-        <version>6.1.26</version>
+        <version>${jetty.version}</version>
       </dependency>
       <dependency>
         <groupId>org.apache.tomcat.embed</groupId>
@@ -591,7 +591,7 @@
       <dependency>
         <groupId>org.mortbay.jetty</groupId>
         <artifactId>jetty-servlet-tester</artifactId>
-        <version>6.1.26</version>
+        <version>${jetty.version}</version>
       </dependency>
       <dependency>
         <groupId>commons-logging</groupId>


[08/50] [abbrv] hadoop git commit: YARN-2421. RM still allocates containers to an app in the FINISHING state. Contributed by Chang Li

Posted by ji...@apache.org.
YARN-2421. RM still allocates containers to an app in the FINISHING state. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7e051c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7e051c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7e051c4

Branch: refs/heads/HDFS-7240
Commit: f7e051c4310024d4040ad466c34432c72e88b0fc
Parents: 03a293a
Author: Jason Lowe <jl...@apache.org>
Authored: Fri May 15 22:09:30 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri May 15 22:09:30 2015 +0000

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../ApplicationMasterService.java               | 22 +++++++++-
 .../TestApplicationMasterService.java           | 44 ++++++++++++++++++++
 3 files changed, 67 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7e051c4/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f2a518e..810152a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -411,6 +411,9 @@ Release 2.8.0 - UNRELEASED
     YARN-1519. Check in container-executor if sysconf is implemented before
     using it (Radim Kolar and Eric Payne via raviprak)
 
+    YARN-2421. RM still allocates containers to an app in the FINISHING
+    state (Chang Li via jlowe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7e051c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index cd1dacf..ee6f6be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStatusupdateEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
@@ -95,6 +96,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -417,6 +419,11 @@ public class ApplicationMasterService extends AbstractService implements
     return hasApplicationMasterRegistered;
   }
 
+  protected final static List<Container> EMPTY_CONTAINER_LIST =
+      new ArrayList<Container>();
+  protected static final Allocation EMPTY_ALLOCATION = new Allocation(
+      EMPTY_CONTAINER_LIST, Resources.createResource(0), null, null, null);
+
   @Override
   public AllocateResponse allocate(AllocateRequest request)
       throws YarnException, IOException {
@@ -530,9 +537,20 @@ public class ApplicationMasterService extends AbstractService implements
       }
 
       // Send new requests to appAttempt.
-      Allocation allocation =
-          this.rScheduler.allocate(appAttemptId, ask, release, 
+      Allocation allocation;
+      RMAppAttemptState state =
+          app.getRMAppAttempt(appAttemptId).getAppAttemptState();
+      if (state.equals(RMAppAttemptState.FINAL_SAVING) ||
+          state.equals(RMAppAttemptState.FINISHING) ||
+          app.isAppFinalStateStored()) {
+        LOG.warn(appAttemptId + " is in " + state +
+                 " state, ignore container allocate request.");
+        allocation = EMPTY_ALLOCATION;
+      } else {
+        allocation =
+          this.rScheduler.allocate(appAttemptId, ask, release,
               blacklistAdditions, blacklistRemovals);
+      }
 
       if (!blacklistAdditions.isEmpty() || !blacklistRemovals.isEmpty()) {
         LOG.info("blacklist are updated in Scheduler." +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7e051c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 8c175b5..85d2515 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.*;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
@@ -310,4 +312,46 @@ public class TestApplicationMasterService {
       rm.stop();
     }
   }
+
+  @Test(timeout=1200000)
+  public void  testAllocateAfterUnregister() throws Exception {
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher rmDispatcher = (DrainDispatcher) rm.getRMContext()
+            .getDispatcher();
+    // Register node1
+    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
+
+    // Submit an application
+    RMApp app1 = rm.submitApp(2048);
+
+    nm1.nodeHeartbeat(true);
+    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+    am1.registerAppAttempt();
+    // unregister app attempt
+    FinishApplicationMasterRequest req =
+        FinishApplicationMasterRequest.newInstance(
+           FinalApplicationStatus.KILLED, "", "");
+    am1.unregisterAppAttempt(req, false);
+    // request container after unregister
+    am1.addRequests(new String[] { "127.0.0.1" }, GB, 1, 1);
+    AllocateResponse alloc1Response = am1.schedule();
+
+    nm1.nodeHeartbeat(true);
+    rmDispatcher.await();
+    alloc1Response = am1.schedule();
+    Assert.assertEquals(0, alloc1Response.getAllocatedContainers().size());
+  }
+
+  private static class MyResourceManager extends MockRM {
+
+    public MyResourceManager(YarnConfiguration conf) {
+      super(conf);
+    }
+    @Override
+    protected Dispatcher createDispatcher() {
+      return new DrainDispatcher();
+    }
+  }
 }


[29/50] [abbrv] hadoop git commit: Move HADOOP-8934 in CHANGES.txt from 3.0.0 to 2.8.0.

Posted by ji...@apache.org.
Move HADOOP-8934 in CHANGES.txt from 3.0.0 to 2.8.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f889a492
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f889a492
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f889a492

Branch: refs/heads/HDFS-7240
Commit: f889a49271f368e8d37a156fb1c568f6d286e88a
Parents: 3b50dcd
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 19 18:01:24 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 19 18:02:10 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f889a492/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8ce77b6..ee7d1e3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -38,9 +38,6 @@ Trunk (Unreleased)
 
     HADOOP-11485. Pluggable shell integration (aw)
 
-    HADOOP-8934. Shell command ls should include sort options (Jonathan Allen
-    via aw)
-
     HADOOP-11554. Expose HadoopKerberosName as a hadoop subcommand (aw)
 
     HADOOP-11565. Add --slaves shell option (aw)
@@ -488,6 +485,9 @@ Release 2.8.0 - UNRELEASED
 
     HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)
 
+    HADOOP-8934. Shell command ls should include sort options (Jonathan Allen
+    via aw)
+
     HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only.
     (Kengo Seki via aajisaka)
 


[25/50] [abbrv] hadoop git commit: YARN-3541. Add version info on timeline service / generic history web UI and REST API. Contributed by Zhijie Shen

Posted by ji...@apache.org.
YARN-3541. Add version info on timeline service / generic history web UI and REST API. Contributed by Zhijie Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76afd288
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76afd288
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76afd288

Branch: refs/heads/HDFS-7240
Commit: 76afd28862c1f27011273659a82cd45903a77170
Parents: cdfae44
Author: Xuan <xg...@apache.org>
Authored: Mon May 18 13:17:16 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Mon May 18 13:17:16 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../api/records/timeline/TimelineAbout.java     | 116 +++++++++++++++++++
 .../yarn/util/timeline/TimelineUtils.java       |  14 +++
 .../webapp/AHSController.java                   |   4 +
 .../webapp/AHSWebApp.java                       |   1 +
 .../webapp/AHSWebServices.java                  |  12 ++
 .../webapp/AboutBlock.java                      |  47 ++++++++
 .../webapp/AboutPage.java                       |  36 ++++++
 .../webapp/NavBlock.java                        |   2 +
 .../timeline/webapp/TimelineWebServices.java    |  41 +------
 .../webapp/TestAHSWebApp.java                   |  14 +++
 .../webapp/TestAHSWebServices.java              |  31 +++++
 .../webapp/TestTimelineWebServices.java         |  25 +++-
 .../src/site/markdown/TimelineServer.md         | 101 +++++++++++++++-
 14 files changed, 404 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 82174e7..c6f753d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -114,6 +114,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3505. Node's Log Aggregation Report with SUCCEED should not cached in 
     RMApps. (Xuan Gong via junping_du)
 
+    YARN-3541. Add version info on timeline service / generic history web UI
+    and REST API. (Zhijie Shen via xgong)
+
   IMPROVEMENTS
 
     YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineAbout.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineAbout.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineAbout.java
new file mode 100644
index 0000000..0a2625c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineAbout.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.timeline;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = "about")
+@XmlAccessorType(XmlAccessType.NONE)
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class TimelineAbout {
+
+  private String about;
+  private String timelineServiceVersion;
+  private String timelineServiceBuildVersion;
+  private String timelineServiceVersionBuiltOn;
+  private String hadoopVersion;
+  private String hadoopBuildVersion;
+  private String hadoopVersionBuiltOn;
+
+  public TimelineAbout() {
+  }
+
+  public TimelineAbout(String about) {
+    this.about = about;
+  }
+
+  @XmlElement(name = "About")
+  public String getAbout() {
+    return about;
+  }
+
+  public void setAbout(String about) {
+    this.about = about;
+  }
+
+  @XmlElement(name = "timeline-service-version")
+  public String getTimelineServiceVersion() {
+    return timelineServiceVersion;
+  }
+
+  public void setTimelineServiceVersion(String timelineServiceVersion) {
+    this.timelineServiceVersion = timelineServiceVersion;
+  }
+
+  @XmlElement(name = "timeline-service-build-version")
+  public String getTimelineServiceBuildVersion() {
+    return timelineServiceBuildVersion;
+  }
+
+  public void setTimelineServiceBuildVersion(
+      String timelineServiceBuildVersion) {
+    this.timelineServiceBuildVersion = timelineServiceBuildVersion;
+  }
+
+  @XmlElement(name = "timeline-service-version-built-on")
+  public String getTimelineServiceVersionBuiltOn() {
+    return timelineServiceVersionBuiltOn;
+  }
+
+  public void setTimelineServiceVersionBuiltOn(
+      String timelineServiceVersionBuiltOn) {
+    this.timelineServiceVersionBuiltOn = timelineServiceVersionBuiltOn;
+  }
+
+  @XmlElement(name = "hadoop-version")
+  public String getHadoopVersion() {
+    return hadoopVersion;
+  }
+
+  public void setHadoopVersion(String hadoopVersion) {
+    this.hadoopVersion = hadoopVersion;
+  }
+
+  @XmlElement(name = "hadoop-build-version")
+  public String getHadoopBuildVersion() {
+    return hadoopBuildVersion;
+  }
+
+  public void setHadoopBuildVersion(String hadoopBuildVersion) {
+    this.hadoopBuildVersion = hadoopBuildVersion;
+  }
+
+  @XmlElement(name = "hadoop-version-built-on")
+  public String getHadoopVersionBuiltOn() {
+    return hadoopVersionBuiltOn;
+  }
+
+  public void setHadoopVersionBuiltOn(String hadoopVersionBuiltOn) {
+    this.hadoopVersionBuiltOn = hadoopVersionBuiltOn;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
index 02b5eb4..4f838e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineUtils.java
@@ -26,7 +26,10 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.codehaus.jackson.JsonGenerationException;
 import org.codehaus.jackson.map.JsonMappingException;
@@ -83,6 +86,17 @@ public class TimelineUtils {
     }
   }
 
+  public static TimelineAbout createTimelineAbout(String about) {
+    TimelineAbout tsInfo = new TimelineAbout(about);
+    tsInfo.setHadoopBuildVersion(VersionInfo.getBuildVersion());
+    tsInfo.setHadoopVersion(VersionInfo.getVersion());
+    tsInfo.setHadoopVersionBuiltOn(VersionInfo.getDate());
+    tsInfo.setTimelineServiceBuildVersion(YarnVersionInfo.getBuildVersion());
+    tsInfo.setTimelineServiceVersion(YarnVersionInfo.getVersion());
+    tsInfo.setTimelineServiceVersionBuiltOn(YarnVersionInfo.getDate());
+    return tsInfo;
+  }
+
   public static InetSocketAddress getTimelineTokenServiceAddress(
       Configuration conf) {
     InetSocketAddress timelineServiceAddr = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
index 4037f51..2e5e97c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSController.java
@@ -34,6 +34,10 @@ public class AHSController extends Controller {
     setTitle("Application History");
   }
 
+  public void about() {
+    render(AboutPage.class);
+  }
+
   public void app() {
     render(AppPage.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
index 80c4550..0193cbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
@@ -56,6 +56,7 @@ public class AHSWebApp extends WebApp implements YarnWebParams {
     bind(ApplicationBaseProtocol.class).toInstance(historyClientService);
     bind(TimelineDataManager.class).toInstance(timelineDataManager);
     route("/", AHSController.class);
+    route("/about", AHSController.class, "about");
     route(pajoin("/apps", APP_STATE), AHSController.class);
     route(pajoin("/app", APPLICATION_ID), AHSController.class, "app");
     route(pajoin("/appattempt", APPLICATION_ATTEMPT_ID), AHSController.class,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 9edc9ab..e7a22bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -34,6 +34,7 @@ import javax.ws.rs.core.MediaType;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
@@ -41,6 +42,7 @@ import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 import com.google.inject.Inject;
@@ -56,6 +58,16 @@ public class AHSWebServices extends WebServices {
   }
 
   @GET
+  @Path("/about")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public TimelineAbout about(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res) {
+    init(res);
+    return TimelineUtils.createTimelineAbout("Generic History Service API");
+  }
+
+  @GET
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AppsInfo get(@Context HttpServletRequest req,
       @Context HttpServletResponse res) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
new file mode 100644
index 0000000..b2419e9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import com.google.inject.Inject;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.apache.hadoop.yarn.webapp.View;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+
+public class AboutBlock extends HtmlBlock {
+  @Inject
+  AboutBlock(View.ViewContext ctx) {
+    super(ctx);
+  }
+
+  @Override
+  protected void render(Block html) {
+    TimelineAbout tsInfo = TimelineUtils.createTimelineAbout(
+        "Timeline Server - Generic History Service UI");
+    info("Timeline Server Overview").
+        _("Timeline Server Version:", tsInfo.getTimelineServiceBuildVersion() +
+            " on " + tsInfo.getTimelineServiceVersionBuiltOn()).
+        _("Hadoop Version:", tsInfo.getHadoopBuildVersion() +
+            " on " + tsInfo.getHadoopVersionBuiltOn());
+    html._(InfoBlock.class);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
new file mode 100644
index 0000000..b50073a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+
+public class AboutPage extends AHSView {
+  @Override protected void preHead(Page.HTML<_> html) {
+    commonPreHead(html);
+    set(TITLE, "Timeline Server - Generic History Service");
+  }
+
+  @Override protected Class<? extends SubView> content() {
+    return AboutBlock.class;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
index 498503f..25ee4f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
@@ -43,6 +43,8 @@ public class NavBlock extends HtmlBlock {
         div("#nav").
             h3("Application History").
                 ul().
+                    li().a(url("about"), "About").
+                    _().
                     li().a(url("apps"), "Applications").
                         ul().
                             li().a(url("apps",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
index 915e3f2..90f4d39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
@@ -42,17 +42,12 @@ import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
@@ -65,6 +60,9 @@ import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
 import org.apache.hadoop.yarn.server.timeline.NameValuePair;
 import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
@@ -86,43 +84,16 @@ public class TimelineWebServices {
     this.timelineDataManager = timelineDataManager;
   }
 
-  @XmlRootElement(name = "about")
-  @XmlAccessorType(XmlAccessType.NONE)
-  @Public
-  @Unstable
-  public static class AboutInfo {
-
-    private String about;
-
-    public AboutInfo() {
-
-    }
-
-    public AboutInfo(String about) {
-      this.about = about;
-    }
-
-    @XmlElement(name = "About")
-    public String getAbout() {
-      return about;
-    }
-
-    public void setAbout(String about) {
-      this.about = about;
-    }
-
-  }
-
   /**
    * Return the description of the timeline web services.
    */
   @GET
   @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
-  public AboutInfo about(
+  public TimelineAbout about(
       @Context HttpServletRequest req,
       @Context HttpServletResponse res) {
     init(res);
-    return new AboutInfo("Timeline API");
+    return TimelineUtils.createTimelineAbout("Timeline API");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
index 2cd7580..1e0886f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebApp.java
@@ -88,6 +88,20 @@ public class TestAHSWebApp extends ApplicationHistoryStoreTestUtils {
   }
 
   @Test
+  public void testAboutPage() throws Exception {
+    Injector injector =
+        WebAppTests.createMockInjector(ApplicationBaseProtocol.class,
+            mockApplicationHistoryClientService(0, 0, 0));
+    AboutPage aboutPageInstance = injector.getInstance(AboutPage.class);
+
+    aboutPageInstance.render();
+    WebAppTests.flushOutput(injector);
+
+    aboutPageInstance.render();
+    WebAppTests.flushOutput(injector);
+  }
+
+  @Test
   public void testAppPage() throws Exception {
     Injector injector =
         WebAppTests.createMockInjector(ApplicationBaseProtocol.class,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
index 913b80d..613df72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -49,6 +49,8 @@ import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
@@ -57,6 +59,7 @@ import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -216,6 +219,34 @@ public class TestAHSWebServices extends JerseyTestBase {
   }
 
   @Test
+  public void testAbout() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r
+        .path("ws").path("v1").path("applicationhistory").path("about")
+        .queryParam("user.name", USERS[round])
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    TimelineAbout actualAbout = response.getEntity(TimelineAbout.class);
+    TimelineAbout expectedAbout =
+        TimelineUtils.createTimelineAbout("Generic History Service API");
+    Assert.assertNotNull(
+        "Timeline service about response is null", actualAbout);
+    Assert.assertEquals(expectedAbout.getAbout(), actualAbout.getAbout());
+    Assert.assertEquals(expectedAbout.getTimelineServiceVersion(),
+        actualAbout.getTimelineServiceVersion());
+    Assert.assertEquals(expectedAbout.getTimelineServiceBuildVersion(),
+        actualAbout.getTimelineServiceBuildVersion());
+    Assert.assertEquals(expectedAbout.getTimelineServiceVersionBuiltOn(),
+        actualAbout.getTimelineServiceVersionBuiltOn());
+    Assert.assertEquals(expectedAbout.getHadoopVersion(),
+        actualAbout.getHadoopVersion());
+    Assert.assertEquals(expectedAbout.getHadoopBuildVersion(),
+        actualAbout.getHadoopBuildVersion());
+    Assert.assertEquals(expectedAbout.getHadoopVersionBuiltOn(),
+        actualAbout.getHadoopVersionBuiltOn());
+  }
+
+  @Test
   public void testAppsQuery() throws Exception {
     WebResource r = resource();
     ClientResponse response =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
index 7e96d2a..ab7cffd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
@@ -29,7 +29,6 @@ import java.util.Collections;
 import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -59,6 +58,8 @@ import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
@@ -184,10 +185,24 @@ public class TestTimelineWebServices extends JerseyTestBase {
         .accept(MediaType.APPLICATION_JSON)
         .get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
-    TimelineWebServices.AboutInfo about =
-        response.getEntity(TimelineWebServices.AboutInfo.class);
-    Assert.assertNotNull(about);
-    Assert.assertEquals("Timeline API", about.getAbout());
+    TimelineAbout actualAbout = response.getEntity(TimelineAbout.class);
+    TimelineAbout expectedAbout =
+        TimelineUtils.createTimelineAbout("Timeline API");
+    Assert.assertNotNull(
+        "Timeline service about response is null", actualAbout);
+    Assert.assertEquals(expectedAbout.getAbout(), actualAbout.getAbout());
+    Assert.assertEquals(expectedAbout.getTimelineServiceVersion(),
+        actualAbout.getTimelineServiceVersion());
+    Assert.assertEquals(expectedAbout.getTimelineServiceBuildVersion(),
+        actualAbout.getTimelineServiceBuildVersion());
+    Assert.assertEquals(expectedAbout.getTimelineServiceVersionBuiltOn(),
+        actualAbout.getTimelineServiceVersionBuiltOn());
+    Assert.assertEquals(expectedAbout.getHadoopVersion(),
+        actualAbout.getHadoopVersion());
+    Assert.assertEquals(expectedAbout.getHadoopBuildVersion(),
+        actualAbout.getHadoopBuildVersion());
+    Assert.assertEquals(expectedAbout.getHadoopVersionBuiltOn(),
+        actualAbout.getHadoopVersionBuiltOn());
   }
 
   private static void verifyEntities(TimelineEntities entities) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76afd288/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index acdd8ff..e177215 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -358,9 +358,17 @@ Here is a non-normative description of the API.
 
     GET /ws/v1/timeline/
 
-Returns a JSON object describing the server instance.
+Returns a JSON object describing the server instance and version information.
 
-     {"About":"Timeline API"}
+     {
+       About: "Timeline API",
+       timeline-service-version: "3.0.0-SNAPSHOT",
+       timeline-service-build-version: "3.0.0-SNAPSHOT from fcd0702c10ce574b887280476aba63d6682d5271 by zshen source checksum e9ec74ea3ff7bc9f3d35e9cac694fb",
+       timeline-service-version-built-on: "2015-05-13T19:45Z",
+       hadoop-version: "3.0.0-SNAPSHOT",
+       hadoop-build-version: "3.0.0-SNAPSHOT from fcd0702c10ce574b887280476aba63d6682d5271 by zshen source checksum 95874b192923b43cdb96a6e483afd60",
+       hadoop-version-built-on: "2015-05-13T19:44Z"
+     }
 
 
 ## <a name="REST_API_DOMAINS"></a>Domains `/ws/v1/timeline/domain`
@@ -889,6 +897,92 @@ Response Body:
 Users can access the generic historic information of applications via REST
 APIs.
 
+## <a name="REST_API_ABOUT"></a>About
+
+With the about API, you can get an timeline about resource that contains
+generic history REST API description and version information.
+
+It is essentially a XML/JSON-serialized form of the YARN `TimelineAbout`
+structure.
+
+### URI:
+
+Use the following URI to obtain an timeline about object.
+
+    http(s)://<timeline server http(s) address:port>/ws/v1/applicationhistory/about
+
+### HTTP Operations Supported:
+
+    GET
+
+### Query Parameters Supported:
+
+None
+
+### Elements of the `about` (Application) Object:
+
+| Item         | Data Type   | Description                   |
+|:---- |:----  |:---- |
+| `About` | string  | The description about the service |
+| `timeline-service-version` | string  | The timeline service version |
+| `timeline-service-build-version` | string  | The timeline service build version |
+| `timeline-service-version-built-on` | string  | On what time the timeline service is built |
+| `hadoop-version` | string  | Hadoop version |
+| `hadoop-build-version` | string  | Hadoop build version |
+| `hadoop-version-built-on` | string  | On what time Hadoop is built |
+
+### Response Examples:
+
+#### JSON response
+
+HTTP Request:
+
+    http://localhost:8188/ws/v1/applicationhistory/about
+
+Response Header:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+    Transfer-Encoding: chunked
+
+Response Body:
+
+    {
+      About: "Generic History Service API",
+      timeline-service-version: "3.0.0-SNAPSHOT",
+      timeline-service-build-version: "3.0.0-SNAPSHOT from fcd0702c10ce574b887280476aba63d6682d5271 by zshen source checksum e9ec74ea3ff7bc9f3d35e9cac694fb",
+      timeline-service-version-built-on: "2015-05-13T19:45Z",
+      hadoop-version: "3.0.0-SNAPSHOT",
+      hadoop-build-version: "3.0.0-SNAPSHOT from fcd0702c10ce574b887280476aba63d6682d5271 by zshen source checksum 95874b192923b43cdb96a6e483afd60",
+      hadoop-version-built-on: "2015-05-13T19:44Z"
+    }
+
+#### XML response
+
+HTTP Request:
+
+    GET http://localhost:8188/ws/v1/applicationhistory/about
+    Accept: application/xml
+
+Response Header:
+
+    HTTP/1.1 200 OK
+    Content-Type: application/xml
+    Content-Length: 748
+
+Response Body:
+
+     <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+     <about>
+       <About>Generic History Service API</About>
+       <hadoop-build-version>3.0.0-SNAPSHOT from fcd0702c10ce574b887280476aba63d6682d5271 by zshen source checksum 95874b192923b43cdb96a6e483afd60</hadoop-build-version>
+       <hadoop-version>3.0.0-SNAPSHOT</hadoop-version>
+       <hadoop-version-built-on>2015-05-13T19:44Z</hadoop-version-built-on>
+       <timeline-service-build-version>3.0.0-SNAPSHOT from fcd0702c10ce574b887280476aba63d6682d5271 by zshen source checksum e9ec74ea3ff7bc9f3d35e9cac694fb</timeline-service-build-version>
+       <timeline-service-version>3.0.0-SNAPSHOT</timeline-service-version>
+       <timeline-service-version-built-on>2015-05-13T19:45Z</timeline-service-version-built-on>
+     </about>
+
 ## <a name="REST_API_LIST_APPLICATIONS"></a>Application List
 
 With the Application List API, you can obtain a collection of resources, each
@@ -1129,7 +1223,8 @@ With the Application API, you can get an application resource contains
 information about a particular application that was running on an YARN
 cluster.
 
-It is essentially a JSON-serialized form of the YARN `ApplicationReport` structure.
+It is essentially a XML/JSON-serialized form of the YARN `ApplicationReport`
+structure.
 
 ### URI:
 


[46/50] [abbrv] hadoop git commit: HADOOP-11698. Remove DistCpV1 and Logalyzer. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa730ce/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
deleted file mode 100644
index 20b8ee2..0000000
--- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
+++ /dev/null
@@ -1,1077 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.tools;
-
-import java.io.ByteArrayOutputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.URI;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.StringTokenizer;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Level;
-
-
-/**
- * A JUnit test for copying files recursively.
- */
-@SuppressWarnings("deprecation")
-public class TestCopyFiles extends TestCase {
-  {
-    ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
-        ).getLogger().setLevel(Level.ERROR);
-    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ERROR);
-    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
-    ((Log4JLogger)DistCpV1.LOG).getLogger().setLevel(Level.ALL);
-  }
-  
-  static final URI LOCAL_FS = URI.create("file:///");
-  
-  private static final Random RAN = new Random();
-  private static final int NFILES = 20;
-  private static String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp"))
-    .toString().replace(' ', '+');
-
-  /** class MyFile contains enough information to recreate the contents of
-   * a single file.
-   */
-  private static class MyFile {
-    private static Random gen = new Random();
-    private static final int MAX_LEVELS = 3;
-    private static final int MAX_SIZE = 8*1024;
-    private static String[] dirNames = {
-      "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
-    };
-    private final String name;
-    private int size = 0;
-    private long seed = 0L;
-
-    MyFile() {
-      this(gen.nextInt(MAX_LEVELS));
-    }
-    MyFile(int nLevels) {
-      String xname = "";
-      if (nLevels != 0) {
-        int[] levels = new int[nLevels];
-        for (int idx = 0; idx < nLevels; idx++) {
-          levels[idx] = gen.nextInt(10);
-        }
-        StringBuffer sb = new StringBuffer();
-        for (int idx = 0; idx < nLevels; idx++) {
-          sb.append(dirNames[levels[idx]]);
-          sb.append("/");
-        }
-        xname = sb.toString();
-      }
-      long fidx = gen.nextLong() & Long.MAX_VALUE;
-      name = xname + Long.toString(fidx);
-      reset();
-    }
-    void reset() {
-      final int oldsize = size;
-      do { size = gen.nextInt(MAX_SIZE); } while (oldsize == size);
-      final long oldseed = seed;
-      do { seed = gen.nextLong() & Long.MAX_VALUE; } while (oldseed == seed);
-    }
-    String getName() { return name; }
-    int getSize() { return size; }
-    long getSeed() { return seed; }
-  }
-
-  private static MyFile[] createFiles(URI fsname, String topdir)
-    throws IOException {
-    return createFiles(FileSystem.get(fsname, new Configuration()), topdir);
-  }
-
-  /** create NFILES with random names and directory hierarchies
-   * with random (but reproducible) data in them.
-   */
-  private static MyFile[] createFiles(FileSystem fs, String topdir)
-    throws IOException {
-    Path root = new Path(topdir);
-    MyFile[] files = new MyFile[NFILES];
-    for (int i = 0; i < NFILES; i++) {
-      files[i] = createFile(root, fs);
-    }
-    return files;
-  }
-
-  static MyFile createFile(Path root, FileSystem fs, int levels)
-      throws IOException {
-    MyFile f = levels < 0 ? new MyFile() : new MyFile(levels);
-    Path p = new Path(root, f.getName());
-    FSDataOutputStream out = fs.create(p);
-    byte[] toWrite = new byte[f.getSize()];
-    new Random(f.getSeed()).nextBytes(toWrite);
-    out.write(toWrite);
-    out.close();
-    FileSystem.LOG.info("created: " + p + ", size=" + f.getSize());
-    return f;
-  }
-
-  static MyFile createFile(Path root, FileSystem fs) throws IOException {
-    return createFile(root, fs, -1);
-  }
-
-  private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files
-      ) throws IOException {
-    return checkFiles(fs, topdir, files, false);    
-  }
-
-  private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files,
-      boolean existingOnly) throws IOException {
-    Path root = new Path(topdir);
-    
-    for (int idx = 0; idx < files.length; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      try {
-        fs.getFileStatus(fPath);
-        FSDataInputStream in = fs.open(fPath);
-        byte[] toRead = new byte[files[idx].getSize()];
-        byte[] toCompare = new byte[files[idx].getSize()];
-        Random rb = new Random(files[idx].getSeed());
-        rb.nextBytes(toCompare);
-        assertEquals("Cannnot read file.", toRead.length, in.read(toRead));
-        in.close();
-        for (int i = 0; i < toRead.length; i++) {
-          if (toRead[i] != toCompare[i]) {
-            return false;
-          }
-        }
-        toRead = null;
-        toCompare = null;
-      }
-      catch(FileNotFoundException fnfe) {
-        if (!existingOnly) {
-          throw fnfe;
-        }
-      }
-    }
-    
-    return true;
-  }
-
-  private static void updateFiles(FileSystem fs, String topdir, MyFile[] files,
-        int nupdate) throws IOException {
-    assert nupdate <= NFILES;
-
-    Path root = new Path(topdir);
-
-    for (int idx = 0; idx < nupdate; ++idx) {
-      Path fPath = new Path(root, files[idx].getName());
-      // overwrite file
-      assertTrue(fPath.toString() + " does not exist", fs.exists(fPath));
-      FSDataOutputStream out = fs.create(fPath);
-      files[idx].reset();
-      byte[] toWrite = new byte[files[idx].getSize()];
-      Random rb = new Random(files[idx].getSeed());
-      rb.nextBytes(toWrite);
-      out.write(toWrite);
-      out.close();
-    }
-  }
-
-  private static FileStatus[] getFileStatus(FileSystem fs,
-      String topdir, MyFile[] files) throws IOException {
-    return getFileStatus(fs, topdir, files, false);
-  }
-  private static FileStatus[] getFileStatus(FileSystem fs,
-      String topdir, MyFile[] files, boolean existingOnly) throws IOException {
-    Path root = new Path(topdir);
-    List<FileStatus> statuses = new ArrayList<FileStatus>();
-    for (int idx = 0; idx < NFILES; ++idx) {
-      try {
-        statuses.add(fs.getFileStatus(new Path(root, files[idx].getName())));
-      } catch(FileNotFoundException fnfe) {
-        if (!existingOnly) {
-          throw fnfe;
-        }
-      }
-    }
-    return statuses.toArray(new FileStatus[statuses.size()]);
-  }
-
-  private static boolean checkUpdate(FileSystem fs, FileStatus[] old,
-      String topdir, MyFile[] upd, final int nupdate) throws IOException {
-    Path root = new Path(topdir);
-
-    // overwrote updated files
-    for (int idx = 0; idx < nupdate; ++idx) {
-      final FileStatus stat =
-        fs.getFileStatus(new Path(root, upd[idx].getName()));
-      if (stat.getModificationTime() <= old[idx].getModificationTime()) {
-        return false;
-      }
-    }
-    // did not overwrite files not updated
-    for (int idx = nupdate; idx < NFILES; ++idx) {
-      final FileStatus stat =
-        fs.getFileStatus(new Path(root, upd[idx].getName()));
-      if (stat.getModificationTime() != old[idx].getModificationTime()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /** delete directory and everything underneath it.*/
-  private static void deldir(FileSystem fs, String topdir) throws IOException {
-    fs.delete(new Path(topdir), true);
-  }
-  
-  /** copy files from local file system to local file system */
-  @SuppressWarnings("deprecation")
-  public void testCopyFromLocalToLocal() throws Exception {
-    Configuration conf = new Configuration();
-    FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
-    MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
-    ToolRunner.run(new DistCpV1(new Configuration()),
-                           new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                                         "file:///"+TEST_ROOT_DIR+"/destdat"});
-    assertTrue("Source and destination directories do not match.",
-               checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
-    deldir(localfs, TEST_ROOT_DIR+"/destdat");
-    deldir(localfs, TEST_ROOT_DIR+"/srcdat");
-  }
-  
-  /** copy files from dfs file system to dfs file system */
-  @SuppressWarnings("deprecation")
-  public void testCopyFromDfsToDfs() throws Exception {
-    String namenode = null;
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
-        assertTrue("Log directory does not exist.",
-                   fs.exists(new Path(namenode+"/logs")));
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/srcdat");
-        deldir(hdfs, "/logs");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  /** copy empty directory on dfs file system */
-  @SuppressWarnings("deprecation")
-  public void testEmptyDir() throws Exception {
-    String namenode = null;
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        
-        FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
-        fs.mkdirs(new Path("/empty"));
-
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/empty",
-                                         namenode+"/dest"});
-        fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
-        assertTrue("Destination directory does not exist.",
-                   fs.exists(new Path(namenode+"/dest")));
-        deldir(hdfs, "/dest");
-        deldir(hdfs, "/empty");
-        deldir(hdfs, "/logs");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-  
-  /** copy files from local file system to dfs file system */
-  @SuppressWarnings("deprecation")
-  public void testCopyFromLocalToDfs() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = hdfs.getUri().toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-log",
-                                         namenode+"/logs",
-                                         "file:///"+TEST_ROOT_DIR+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(cluster.getFileSystem(), "/destdat", files));
-        assertTrue("Log directory does not exist.",
-                    hdfs.exists(new Path(namenode+"/logs")));
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/logs");
-        deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  /** copy files from dfs file system to local file system */
-  @SuppressWarnings("deprecation")
-  public void testCopyFromDfsToLocal() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-log",
-                                         "/logs",
-                                         namenode+"/srcdat",
-                                         "file:///"+TEST_ROOT_DIR+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
-        assertTrue("Log directory does not exist.",
-                    hdfs.exists(new Path("/logs")));
-        deldir(localfs, TEST_ROOT_DIR+"/destdat");
-        deldir(hdfs, "/logs");
-        deldir(hdfs, "/srcdat");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = hdfs.getUri().toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-p",
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
-        assertTrue("Log directory does not exist.",
-                    fs.exists(new Path(namenode+"/logs")));
-
-        FileStatus[] dchkpoint = getFileStatus(hdfs, "/destdat", files);
-        final int nupdate = NFILES>>2;
-        updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate);
-        deldir(hdfs, "/logs");
-
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-prbugp", // no t to avoid preserving mod. times
-                                         "-update",
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        assertTrue("Update failed to replicate all changes in src",
-                 checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate));
-
-        deldir(hdfs, "/logs");
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-prbugp", // no t to avoid preserving mod. times
-                                         "-overwrite",
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        assertTrue("-overwrite didn't.",
-                 checkUpdate(hdfs, dchkpoint, "/destdat", files, NFILES));
-
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/srcdat");
-        deldir(hdfs, "/logs");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = hdfs.getUri().toString();
-      
-      FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
-      // Create two files of the same name, same length but different
-      // contents
-      final String testfilename = "test";
-      final String srcData = "act act act";
-      final String destData = "cat cat cat";
-      
-      if (namenode.startsWith("hdfs://")) {
-        deldir(hdfs,"/logs");
-        
-        Path srcPath = new Path("/srcdat", testfilename);
-        Path destPath = new Path("/destdat", testfilename);
-        FSDataOutputStream out = fs.create(srcPath, true);
-        out.writeUTF(srcData);
-        out.close();
-
-        out = fs.create(destPath, true);
-        out.writeUTF(destData);
-        out.close();
-        
-        // Run with -skipcrccheck option
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-          "-p",
-          "-update",
-          "-skipcrccheck",
-          "-log",
-          namenode+"/logs",
-          namenode+"/srcdat",
-          namenode+"/destdat"});
-        
-        // File should not be overwritten
-        FSDataInputStream in = hdfs.open(destPath);
-        String s = in.readUTF();
-        System.out.println("Dest had: " + s);
-        assertTrue("Dest got over written even with skip crc",
-            s.equalsIgnoreCase(destData));
-        in.close();
-        
-        deldir(hdfs, "/logs");
-
-        // Run without the option        
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-          "-p",
-          "-update",
-          "-log",
-          namenode+"/logs",
-          namenode+"/srcdat",
-          namenode+"/destdat"});
-        
-        // File should be overwritten
-        in = hdfs.open(destPath);
-        s = in.readUTF();
-        System.out.println("Dest had: " + s);
-
-        assertTrue("Dest did not get overwritten without skip crc",
-            s.equalsIgnoreCase(srcData));
-        in.close();
-
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/srcdat");
-        deldir(hdfs, "/logs");
-       }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  public void testCopyDuplication() throws Exception {
-    final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration());
-    try {    
-      MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat");
-      ToolRunner.run(new DistCpV1(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/src2/srcdat"});
-      assertTrue("Source and destination directories do not match.",
-                 checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files));
-  
-      assertEquals(DistCpV1.DuplicationException.ERROR_CODE,
-          ToolRunner.run(new DistCpV1(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/src2/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/destdat",}));
-    }
-    finally {
-      deldir(localfs, TEST_ROOT_DIR+"/destdat");
-      deldir(localfs, TEST_ROOT_DIR+"/srcdat");
-      deldir(localfs, TEST_ROOT_DIR+"/src2");
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  public void testCopySingleFile() throws Exception {
-    FileSystem fs = FileSystem.get(LOCAL_FS, new Configuration());
-    Path root = new Path(TEST_ROOT_DIR+"/srcdat");
-    try {    
-      MyFile[] files = {createFile(root, fs)};
-      //copy a dir with a single file
-      ToolRunner.run(new DistCpV1(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/destdat"});
-      assertTrue("Source and destination directories do not match.",
-                 checkFiles(fs, TEST_ROOT_DIR+"/destdat", files));
-      
-      //copy a single file
-      String fname = files[0].getName();
-      Path p = new Path(root, fname);
-      FileSystem.LOG.info("fname=" + fname + ", exists? " + fs.exists(p));
-      ToolRunner.run(new DistCpV1(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat/"+fname,
-                        "file:///"+TEST_ROOT_DIR+"/dest2/"+fname});
-      assertTrue("Source and destination directories do not match.",
-          checkFiles(fs, TEST_ROOT_DIR+"/dest2", files));     
-      
-      // single file update should skip copy if destination has the file already
-      String[] args = {"-update", "file:///"+TEST_ROOT_DIR+"/srcdat/"+fname,
-          "file:///"+TEST_ROOT_DIR+"/dest2/"+fname};
-      Configuration conf = new Configuration();
-      JobConf job = new JobConf(conf, DistCpV1.class);
-      DistCpV1.Arguments distcpArgs = DistCpV1.Arguments.valueOf(args, conf);
-      assertFalse("Single file update failed to skip copying even though the " 
-          + "file exists at destination.", DistCpV1.setup(conf, job, distcpArgs));
-      
-      //copy single file to existing dir
-      deldir(fs, TEST_ROOT_DIR+"/dest2");
-      fs.mkdirs(new Path(TEST_ROOT_DIR+"/dest2"));
-      MyFile[] files2 = {createFile(root, fs, 0)};
-      String sname = files2[0].getName();
-      ToolRunner.run(new DistCpV1(new Configuration()),
-          new String[] {"-update",
-                        "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
-                        "file:///"+TEST_ROOT_DIR+"/dest2/"});
-      assertTrue("Source and destination directories do not match.",
-          checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2));     
-      updateFiles(fs, TEST_ROOT_DIR+"/srcdat", files2, 1);
-      //copy single file to existing dir w/ dst name conflict
-      ToolRunner.run(new DistCpV1(new Configuration()),
-          new String[] {"-update",
-                        "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
-                        "file:///"+TEST_ROOT_DIR+"/dest2/"});
-      assertTrue("Source and destination directories do not match.",
-          checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2));     
-    }
-    finally {
-      deldir(fs, TEST_ROOT_DIR+"/destdat");
-      deldir(fs, TEST_ROOT_DIR+"/dest2");
-      deldir(fs, TEST_ROOT_DIR+"/srcdat");
-    }
-  }
-
-  /** tests basedir option copying files from dfs file system to dfs file system */
-  @SuppressWarnings("deprecation")
-  public void testBasedir() throws Exception {
-    String namenode = null;
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-basedir",
-                                         "/basedir",
-                                         namenode+"/basedir/middle/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat/middle/srcdat", files));
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/basedir");
-        deldir(hdfs, "/logs");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  public void testPreserveOption() throws Exception {
-    Configuration conf = new Configuration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      String nnUri = FileSystem.getDefaultUri(conf).toString();
-      FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
-
-      {//test preserving user
-        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
-        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
-        for(int i = 0; i < srcstat.length; i++) {
-          fs.setOwner(srcstat[i].getPath(), "u" + i, null);
-        }
-        ToolRunner.run(new DistCpV1(conf),
-            new String[]{"-pu", nnUri+"/srcdat", nnUri+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(fs, "/destdat", files));
-        
-        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
-        for(int i = 0; i < dststat.length; i++) {
-          assertEquals("i=" + i, "u" + i, dststat[i].getOwner());
-        }
-        deldir(fs, "/destdat");
-        deldir(fs, "/srcdat");
-      }
-
-      {//test preserving group
-        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
-        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
-        for(int i = 0; i < srcstat.length; i++) {
-          fs.setOwner(srcstat[i].getPath(), null, "g" + i);
-        }
-        ToolRunner.run(new DistCpV1(conf),
-            new String[]{"-pg", nnUri+"/srcdat", nnUri+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(fs, "/destdat", files));
-        
-        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
-        for(int i = 0; i < dststat.length; i++) {
-          assertEquals("i=" + i, "g" + i, dststat[i].getGroup());
-        }
-        deldir(fs, "/destdat");
-        deldir(fs, "/srcdat");
-      }
-
-      {//test preserving mode
-        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
-        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
-        FsPermission[] permissions = new FsPermission[srcstat.length];
-        for(int i = 0; i < srcstat.length; i++) {
-          permissions[i] = new FsPermission((short)(i & 0666));
-          fs.setPermission(srcstat[i].getPath(), permissions[i]);
-        }
-
-        ToolRunner.run(new DistCpV1(conf),
-            new String[]{"-pp", nnUri+"/srcdat", nnUri+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(fs, "/destdat", files));
-  
-        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
-        for(int i = 0; i < dststat.length; i++) {
-          assertEquals("i=" + i, permissions[i], dststat[i].getPermission());
-        }
-        deldir(fs, "/destdat");
-        deldir(fs, "/srcdat");
-      }
-
-      {//test preserving times
-        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
-        fs.mkdirs(new Path("/srcdat/tmpf1"));
-        fs.mkdirs(new Path("/srcdat/tmpf2"));
-        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
-        FsPermission[] permissions = new FsPermission[srcstat.length];
-        for(int i = 0; i < srcstat.length; i++) {
-          fs.setTimes(srcstat[i].getPath(), 40, 50);
-        }
-
-        ToolRunner.run(new DistCpV1(conf),
-            new String[]{"-pt", nnUri+"/srcdat", nnUri+"/destdat"});
-
-        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
-        for(int i = 0; i < dststat.length; i++) {
-          assertEquals("Modif. Time i=" + i, 40, dststat[i].getModificationTime());
-          assertEquals("Access Time i=" + i+ srcstat[i].getPath() + "-" + dststat[i].getPath(), 50, dststat[i].getAccessTime());
-        }
-        
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(fs, "/destdat", files));
-  
-        deldir(fs, "/destdat");
-        deldir(fs, "/srcdat");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  public void testMapCount() throws Exception {
-    String namenode = null;
-    MiniDFSCluster dfs = null;
-    MiniDFSCluster mr = null;
-    try {
-      Configuration conf = new Configuration();
-      
-      dfs= new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
-      
-      FileSystem fs = dfs.getFileSystem();
-      final FsShell shell = new FsShell(conf);
-      namenode = fs.getUri().toString();
-      MyFile[] files = createFiles(fs.getUri(), "/srcdat");
-      long totsize = 0;
-      for (MyFile f : files) {
-        totsize += f.getSize();
-      }
-      
-      Configuration job = new JobConf(conf);
-      job.setLong("distcp.bytes.per.map", totsize / 3);
-      ToolRunner.run(new DistCpV1(job),
-          new String[] {"-m", "100",
-                        "-log",
-                        namenode+"/logs",
-                        namenode+"/srcdat",
-                        namenode+"/destdat"});
-      assertTrue("Source and destination directories do not match.",
-                 checkFiles(fs, "/destdat", files));
-
-      String logdir = namenode + "/logs";
-      System.out.println(execCmd(shell, "-lsr", logdir));
-      FileStatus[] logs = fs.listStatus(new Path(logdir));
-      // rare case where splits are exact, logs.length can be 4
-      assertTrue( logs.length == 2);
-
-      deldir(fs, "/destdat");
-      deldir(fs, "/logs");
-      ToolRunner.run(new DistCpV1(job),
-          new String[] {"-m", "1",
-                        "-log",
-                        namenode+"/logs",
-                        namenode+"/srcdat",
-                        namenode+"/destdat"});
-
-      System.out.println(execCmd(shell, "-lsr", logdir));
-      logs = fs.globStatus(new Path(namenode+"/logs/part*"));
-      assertTrue("Unexpected map count, logs.length=" + logs.length,
-          logs.length == 1);
-    } finally {
-      if (dfs != null) { dfs.shutdown(); }
-      if (mr != null) { mr.shutdown(); }
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  public void testLimits() throws Exception {
-    Configuration conf = new Configuration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final String nnUri = FileSystem.getDefaultUri(conf).toString();
-      final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
-      final DistCpV1 distcp = new DistCpV1(conf);
-      final FsShell shell = new FsShell(conf);  
-
-      final String srcrootdir =  "/src_root";
-      final Path srcrootpath = new Path(srcrootdir); 
-      final String dstrootdir =  "/dst_root";
-      final Path dstrootpath = new Path(dstrootdir); 
-
-      {//test -filelimit
-        MyFile[] files = createFiles(URI.create(nnUri), srcrootdir);
-        int filelimit = files.length / 2;
-        System.out.println("filelimit=" + filelimit);
-
-        ToolRunner.run(distcp,
-            new String[]{"-filelimit", ""+filelimit, nnUri+srcrootdir, nnUri+dstrootdir});
-        String results = execCmd(shell, "-lsr", dstrootdir);
-        results = removePrefix(results, dstrootdir);
-        System.out.println("results=" +  results);
-
-        FileStatus[] dststat = getFileStatus(fs, dstrootdir, files, true);
-        assertEquals(filelimit, dststat.length);
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-
-      {//test -sizelimit
-        createFiles(URI.create(nnUri), srcrootdir);
-        long sizelimit = fs.getContentSummary(srcrootpath).getLength()/2;
-        System.out.println("sizelimit=" + sizelimit);
-
-        ToolRunner.run(distcp,
-            new String[]{"-sizelimit", ""+sizelimit, nnUri+srcrootdir, nnUri+dstrootdir});
-        
-        ContentSummary summary = fs.getContentSummary(dstrootpath);
-        System.out.println("summary=" + summary);
-        assertTrue(summary.getLength() <= sizelimit);
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-
-      {//test update
-        final MyFile[] srcs = createFiles(URI.create(nnUri), srcrootdir);
-        final long totalsize = fs.getContentSummary(srcrootpath).getLength();
-        System.out.println("src.length=" + srcs.length);
-        System.out.println("totalsize =" + totalsize);
-        fs.mkdirs(dstrootpath);
-        final int parts = RAN.nextInt(NFILES/3 - 1) + 2;
-        final int filelimit = srcs.length/parts;
-        final long sizelimit = totalsize/parts;
-        System.out.println("filelimit=" + filelimit);
-        System.out.println("sizelimit=" + sizelimit);
-        System.out.println("parts    =" + parts);
-        final String[] args = {"-filelimit", ""+filelimit, "-sizelimit", ""+sizelimit,
-            "-update", nnUri+srcrootdir, nnUri+dstrootdir};
-
-        int dstfilecount = 0;
-        long dstsize = 0;
-        for(int i = 0; i <= parts; i++) {
-          ToolRunner.run(distcp, args);
-        
-          FileStatus[] dststat = getFileStatus(fs, dstrootdir, srcs, true);
-          System.out.println(i + ") dststat.length=" + dststat.length);
-          assertTrue(dststat.length - dstfilecount <= filelimit);
-          ContentSummary summary = fs.getContentSummary(dstrootpath);
-          System.out.println(i + ") summary.getLength()=" + summary.getLength());
-          assertTrue(summary.getLength() - dstsize <= sizelimit);
-          assertTrue(checkFiles(fs, dstrootdir, srcs, true));
-          dstfilecount = dststat.length;
-          dstsize = summary.getLength();
-        }
-
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  static final long now = System.currentTimeMillis();
-
-  static UserGroupInformation createUGI(String name, boolean issuper) {
-    String username = name + now;
-    String group = issuper? "supergroup": username;
-    return UserGroupInformation.createUserForTesting(username, 
-        new String[]{group});
-  }
-
-  static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi
-      ) throws IOException {
-    final Path home = new Path("/user/" + ugi.getUserName());
-    fs.mkdirs(home);
-    fs.setOwner(home, ugi.getUserName(), ugi.getGroupNames()[0]);
-    fs.setPermission(home, new FsPermission((short)0700));
-    return home;
-  }
-
-  /** test -delete */
-  @SuppressWarnings("deprecation")
-  public void testDelete() throws Exception {
-    final Configuration conf = new Configuration();
-    conf.setInt("fs.trash.interval", 60);
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final URI nnURI = FileSystem.getDefaultUri(conf);
-      final String nnUri = nnURI.toString();
-      final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
-
-      final DistCpV1 distcp = new DistCpV1(conf);
-      final FsShell shell = new FsShell(conf);  
-
-      final String srcrootdir = "/src_root";
-      final String dstrootdir = "/dst_root";
-
-      {
-        //create source files
-        createFiles(nnURI, srcrootdir);
-        String srcresults = execCmd(shell, "-lsr", srcrootdir);
-        srcresults = removePrefix(srcresults, srcrootdir);
-        System.out.println("srcresults=" +  srcresults);
-
-        //create some files in dst
-        createFiles(nnURI, dstrootdir);
-        System.out.println("dstrootdir=" +  dstrootdir);
-        shell.run(new String[]{"-lsr", dstrootdir});
-
-        //run distcp
-        ToolRunner.run(distcp,
-            new String[]{"-delete", "-update", "-log", "/log",
-                         nnUri+srcrootdir, nnUri+dstrootdir});
-
-        //make sure src and dst contains the same files
-        String dstresults = execCmd(shell, "-lsr", dstrootdir);
-        dstresults = removePrefix(dstresults, dstrootdir);
-        System.out.println("first dstresults=" +  dstresults);
-        assertEquals(srcresults, dstresults);
-
-        //create additional file in dst
-        create(fs, new Path(dstrootdir, "foo"));
-        create(fs, new Path(dstrootdir, "foobar"));
-
-        //run distcp again
-        ToolRunner.run(distcp,
-            new String[]{"-delete", "-update", "-log", "/log2",
-                         nnUri+srcrootdir, nnUri+dstrootdir});
-        
-        //make sure src and dst contains the same files
-        dstresults = execCmd(shell, "-lsr", dstrootdir);
-        dstresults = removePrefix(dstresults, dstrootdir);
-        System.out.println("second dstresults=" +  dstresults);
-        assertEquals(srcresults, dstresults);
-        // verify that files removed in -delete were moved to the trash
-        // regrettably, this test will break if Trash changes incompatibly
-        assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
-                ".Trash/Current" + dstrootdir + "/foo")));
-        assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
-                ".Trash/Current" + dstrootdir + "/foobar")));
-
-        //cleanup
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  /**
-   * verify that -delete option works for other {@link FileSystem}
-   * implementations. See MAPREDUCE-1285 */
-  @SuppressWarnings("deprecation")
-  public void testDeleteLocal() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        String destdir = TEST_ROOT_DIR + "/destdat";
-        MyFile[] localFiles = createFiles(localfs, destdir);
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-delete",
-                                         "-update",
-                                         "-log",
-                                         "/logs",
-                                         namenode+"/srcdat",
-                                         "file:///"+TEST_ROOT_DIR+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(localfs, destdir, files));
-        assertTrue("Log directory does not exist.",
-                    hdfs.exists(new Path("/logs")));
-        deldir(localfs, destdir);
-        deldir(hdfs, "/logs");
-        deldir(hdfs, "/srcdat");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  /** test globbing  */
-  @SuppressWarnings("deprecation")
-  public void testGlobbing() throws Exception {
-    String namenode = null;
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      final FileSystem hdfs = cluster.getFileSystem();
-      namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCpV1(conf), new String[] {
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat/*",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
-        assertTrue("Log directory does not exist.",
-                   fs.exists(new Path(namenode+"/logs")));
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/srcdat");
-        deldir(hdfs, "/logs");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-  
-  static void create(FileSystem fs, Path f) throws IOException {
-    FSDataOutputStream out = fs.create(f);
-    try {
-      byte[] b = new byte[1024 + RAN.nextInt(1024)];
-      RAN.nextBytes(b);
-      out.write(b);
-    } finally {
-      if (out != null) out.close();
-    }
-  }
-  
-  static String execCmd(FsShell shell, String... args) throws Exception {
-    ByteArrayOutputStream baout = new ByteArrayOutputStream();
-    PrintStream out = new PrintStream(baout, true);
-    PrintStream old = System.out;
-    System.setOut(out);
-    shell.run(args);
-    out.close();
-    System.setOut(old);
-    return baout.toString();
-  }
-  
-  private static String removePrefix(String lines, String prefix) {
-    final int prefixlen = prefix.length();
-    final StringTokenizer t = new StringTokenizer(lines, "\n");
-    final StringBuffer results = new StringBuffer(); 
-    for(; t.hasMoreTokens(); ) {
-      String s = t.nextToken();
-      results.append(s.substring(s.indexOf(prefix) + prefixlen) + "\n");
-    }
-    return results.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa730ce/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
deleted file mode 100644
index 3787f6c..0000000
--- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.tools;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.EnumSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class TestLogalyzer {
-  private static String EL = System.getProperty("line.separator");
-  private static String TAB = "\t";
-  private static final Log LOG = LogFactory.getLog(TestLogalyzer.class);
-
-  private static File workSpace = new File("target",
-      TestLogalyzer.class.getName() + "-workSpace");
-  private static File outdir = new File(workSpace.getAbsoluteFile()
-      + File.separator + "out");
-
-  @Test
-  @SuppressWarnings("deprecation")
-  public void testLogalyzer() throws Exception {
-    Path f = createLogFile();
-
-    String[] args = new String[10];
-
-    args[0] = "-archiveDir";
-    args[1] = f.toString();
-    args[2] = "-grep";
-    args[3] = "44";
-    args[4] = "-sort";
-    args[5] = "0";
-    args[6] = "-analysis";
-    args[7] = outdir.getAbsolutePath();
-    args[8] = "-separator";
-    args[9] = " ";
-
-    Logalyzer.main(args);
-    checkResult();
-
-  }
-
-  private void checkResult() throws Exception {
-    File result = new File(outdir.getAbsolutePath() + File.separator
-        + "part-00000");
-    File success = new File(outdir.getAbsolutePath() + File.separator
-        + "_SUCCESS");
-    Assert.assertTrue(success.exists());
-
-    FileInputStream fis = new FileInputStream(result);
-    BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
-    String line = br.readLine();
-    Assert.assertTrue(("1 44" + TAB + "2").equals(line));
-    line = br.readLine();
-
-    Assert.assertTrue(("3 44" + TAB + "1").equals(line));
-    line = br.readLine();
-
-    Assert.assertTrue(("4 44" + TAB + "1").equals(line));
-
-    br.close();
-
-  }
-
-  /**
-   * Create simple log file
-   * 
-   * @return
-   * @throws IOException
-   */
-
-  private Path createLogFile() throws IOException {
-
-    FileContext files = FileContext.getLocalFSFileContext();
-
-    Path ws = new Path(workSpace.getAbsoluteFile().getAbsolutePath());
-
-    files.delete(ws, true);
-    Path workSpacePath = new Path(workSpace.getAbsolutePath(), "log");
-    files.mkdir(workSpacePath, null, true);
-
-    LOG.info("create logfile.log");
-    Path logfile1 = new Path(workSpacePath, "logfile.log");
-
-    FSDataOutputStream os = files.create(logfile1,
-        EnumSet.of(CreateFlag.CREATE));
-    os.writeBytes("4 3" + EL + "1 3" + EL + "4 44" + EL);
-    os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
-    os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
-
-    os.flush();
-    os.close();
-    LOG.info("create logfile1.log");
-
-    Path logfile2 = new Path(workSpacePath, "logfile1.log");
-
-    os = files.create(logfile2, EnumSet.of(CreateFlag.CREATE));
-    os.writeBytes("4 3" + EL + "1 3" + EL + "3 44" + EL);
-    os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
-    os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
-
-    os.flush();
-    os.close();
-
-    return workSpacePath;
-  }
-}


[19/50] [abbrv] hadoop git commit: HADOOP-11884. test-patch.sh should pull the real findbugs version (Kengo Seki via aw)

Posted by ji...@apache.org.
HADOOP-11884. test-patch.sh should pull the real findbugs version  (Kengo Seki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/182d86da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/182d86da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/182d86da

Branch: refs/heads/HDFS-7240
Commit: 182d86dac04a2168b1888af34f0a7042379d7e53
Parents: a6af024
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 18 16:08:49 2015 +0000
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Mon May 18 16:08:49 2015 +0000

----------------------------------------------------------------------
 dev-support/test-patch.sh                       | 5 +++--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/182d86da/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 9cc5bb0..00a638c 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1859,8 +1859,6 @@ function check_findbugs
     return 1
   fi
 
-  findbugs_version=$("${FINDBUGS_HOME}/bin/findbugs" -version)
-
   for module in ${modules}
   do
     pushd "${module}" >/dev/null
@@ -1872,6 +1870,9 @@ function check_findbugs
     popd >/dev/null
   done
 
+  #shellcheck disable=SC2016
+  findbugs_version=$(${AWK} 'match($0, /findbugs-maven-plugin:[^:]*:findbugs/) { print substr($0, RSTART + 22, RLENGTH - 31); exit }' "${PATCH_DIR}/patchFindBugsOutput${module_suffix}.txt")
+
   if [[ ${rc} -ne 0 ]]; then
     add_jira_table -1 findbugs "The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
     return 1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/182d86da/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2138334..1c2cdaa 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -575,6 +575,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11939. Deprecate DistCpV1 and Logalyzer.
     (Brahma Reddy Battula via aajisaka)
 
+    HADOOP-11884. test-patch.sh should pull the real findbugs version
+    (Kengo Seki via aw)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp


[15/50] [abbrv] hadoop git commit: HADOOP-11939. Deprecate DistCpV1 and Logalyzer. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
HADOOP-11939. Deprecate DistCpV1 and Logalyzer. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5c48238
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5c48238
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5c48238

Branch: refs/heads/HDFS-7240
Commit: f5c48238d5eb0c1d2b876f390ac6c35221efcb54
Parents: cab0dad
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon May 18 16:13:16 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 18 16:13:16 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/tools/DistCpV1.java  |  1 +
 .../java/org/apache/hadoop/tools/Logalyzer.java |  3 ++-
 .../org/apache/hadoop/tools/TestCopyFiles.java  | 22 +++++++++++++++++---
 .../org/apache/hadoop/tools/TestLogalyzer.java  |  1 +
 5 files changed, 26 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5c48238/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5666035..7349091 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -572,6 +572,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11960. Enable Azure-Storage Client Side logging.
     (Dushyanth via cnauroth)
 
+    HADOOP-11939. Deprecate DistCpV1 and Logalyzer.
+    (Brahma Reddy Battula via aajisaka)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5c48238/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
index 8a6819b..39ac5c3 100644
--- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
+++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistCpV1.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
  * A Map-reduce program to recursively copy directories between
  * different file-systems.
  */
+@Deprecated
 public class DistCpV1 implements Tool {
   public static final Log LOG = LogFactory.getLog(DistCpV1.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5c48238/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
index 449ecbf..05e6e24 100644
--- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
+++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/Logalyzer.java
@@ -65,7 +65,7 @@ import org.apache.hadoop.mapreduce.lib.map.RegexMapper;
  * &lt;col1, col2&gt; -separator &lt;separator&gt;
  * <p>
  */
-
+@Deprecated
 public class Logalyzer {
   // Constants
   private static Configuration fsConfig = new Configuration();
@@ -194,6 +194,7 @@ public class Logalyzer {
    * @param archiveDirectory : The directory to store archived logfiles.
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   public void	
     doArchive(String logListURI, String archiveDirectory)
     throws IOException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5c48238/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
index 73bba66..20b8ee2 100644
--- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
+++ b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java
@@ -55,7 +55,7 @@ import org.apache.log4j.Level;
 /**
  * A JUnit test for copying files recursively.
  */
-
+@SuppressWarnings("deprecation")
 public class TestCopyFiles extends TestCase {
   {
     ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
@@ -261,6 +261,7 @@ public class TestCopyFiles extends TestCase {
   }
   
   /** copy files from local file system to local file system */
+  @SuppressWarnings("deprecation")
   public void testCopyFromLocalToLocal() throws Exception {
     Configuration conf = new Configuration();
     FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
@@ -275,6 +276,7 @@ public class TestCopyFiles extends TestCase {
   }
   
   /** copy files from dfs file system to dfs file system */
+  @SuppressWarnings("deprecation")
   public void testCopyFromDfsToDfs() throws Exception {
     String namenode = null;
     MiniDFSCluster cluster = null;
@@ -305,6 +307,7 @@ public class TestCopyFiles extends TestCase {
   }
 
   /** copy empty directory on dfs file system */
+  @SuppressWarnings("deprecation")
   public void testEmptyDir() throws Exception {
     String namenode = null;
     MiniDFSCluster cluster = null;
@@ -336,6 +339,7 @@ public class TestCopyFiles extends TestCase {
   }
   
   /** copy files from local file system to dfs file system */
+  @SuppressWarnings("deprecation")
   public void testCopyFromLocalToDfs() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -364,6 +368,7 @@ public class TestCopyFiles extends TestCase {
   }
 
   /** copy files from dfs file system to local file system */
+  @SuppressWarnings("deprecation")
   public void testCopyFromDfsToLocal() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -392,6 +397,7 @@ public class TestCopyFiles extends TestCase {
     }
   }
 
+  @SuppressWarnings("deprecation")
   public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -452,6 +458,7 @@ public class TestCopyFiles extends TestCase {
     }
   }
 
+  @SuppressWarnings("deprecation")
   public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -526,7 +533,8 @@ public class TestCopyFiles extends TestCase {
       if (cluster != null) { cluster.shutdown(); }
     }
   }
-    
+
+  @SuppressWarnings("deprecation")
   public void testCopyDuplication() throws Exception {
     final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration());
     try {    
@@ -550,6 +558,7 @@ public class TestCopyFiles extends TestCase {
     }
   }
 
+  @SuppressWarnings("deprecation")
   public void testCopySingleFile() throws Exception {
     FileSystem fs = FileSystem.get(LOCAL_FS, new Configuration());
     Path root = new Path(TEST_ROOT_DIR+"/srcdat");
@@ -609,6 +618,7 @@ public class TestCopyFiles extends TestCase {
   }
 
   /** tests basedir option copying files from dfs file system to dfs file system */
+  @SuppressWarnings("deprecation")
   public void testBasedir() throws Exception {
     String namenode = null;
     MiniDFSCluster cluster = null;
@@ -634,7 +644,8 @@ public class TestCopyFiles extends TestCase {
       if (cluster != null) { cluster.shutdown(); }
     }
   }
-  
+
+  @SuppressWarnings("deprecation")
   public void testPreserveOption() throws Exception {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
@@ -733,6 +744,7 @@ public class TestCopyFiles extends TestCase {
     }
   }
 
+  @SuppressWarnings("deprecation")
   public void testMapCount() throws Exception {
     String namenode = null;
     MiniDFSCluster dfs = null;
@@ -787,6 +799,7 @@ public class TestCopyFiles extends TestCase {
     }
   }
 
+  @SuppressWarnings("deprecation")
   public void testLimits() throws Exception {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
@@ -892,6 +905,7 @@ public class TestCopyFiles extends TestCase {
   }
 
   /** test -delete */
+  @SuppressWarnings("deprecation")
   public void testDelete() throws Exception {
     final Configuration conf = new Configuration();
     conf.setInt("fs.trash.interval", 60);
@@ -964,6 +978,7 @@ public class TestCopyFiles extends TestCase {
   /**
    * verify that -delete option works for other {@link FileSystem}
    * implementations. See MAPREDUCE-1285 */
+  @SuppressWarnings("deprecation")
   public void testDeleteLocal() throws Exception {
     MiniDFSCluster cluster = null;
     try {
@@ -997,6 +1012,7 @@ public class TestCopyFiles extends TestCase {
   }
 
   /** test globbing  */
+  @SuppressWarnings("deprecation")
   public void testGlobbing() throws Exception {
     String namenode = null;
     MiniDFSCluster cluster = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5c48238/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
index 2042604..3787f6c 100644
--- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
+++ b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestLogalyzer.java
@@ -44,6 +44,7 @@ public class TestLogalyzer {
       + File.separator + "out");
 
   @Test
+  @SuppressWarnings("deprecation")
   public void testLogalyzer() throws Exception {
     Path f = createLogFile();
 


[31/50] [abbrv] hadoop git commit: Move HADOOP-11581 in CHANGES.txt from 3.0.0 to 2.8.0.

Posted by ji...@apache.org.
Move HADOOP-11581 in CHANGES.txt from 3.0.0 to 2.8.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb4c9dde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb4c9dde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb4c9dde

Branch: refs/heads/HDFS-7240
Commit: eb4c9ddeb54e440d637a45d59d7c127ad9bcbaff
Parents: d4a2830
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 19 18:38:35 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 19 18:38:35 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb4c9dde/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8c7c978..baf9a0f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -424,9 +424,6 @@ Trunk (Unreleased)
     HADOOP-11559. Add links to RackAwareness and InterfaceClassification
     to site index (Masatake Iwasaki via aw)
 
-    HADOOP-11581. Multithreaded correctness Warnings
-    #org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
-
     HADOOP-11580. Remove SingleNodeSetup.md from trunk (aajisaka)
 
     HADOOP-11583. Fix syntax error in SecureMode.md (Masatake Iwasaki via aw)
@@ -713,6 +710,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-10582. Fix the test case for copying to non-existent dir in
     TestFsShellCopy. (Kousuke Saruta via aajisaka)
 
+    HADOOP-11581. Multithreaded correctness Warnings
+    #org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[27/50] [abbrv] hadoop git commit: HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions (Contributed by Rakesh R)

Posted by ji...@apache.org.
HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions (Contributed by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93972a33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93972a33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93972a33

Branch: refs/heads/HDFS-7240
Commit: 93972a332a9fc6390447fc5fc9785c98fb4c3344
Parents: 0790275
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue May 19 12:24:25 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue May 19 12:24:25 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  5 ++-
 .../hdfs/server/namenode/SecondaryNameNode.java | 33 ++++++++++----------
 .../hdfs/server/namenode/TestStartup.java       | 18 ++++++++++-
 3 files changed, 37 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93972a33/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 35c3b5a..e5fcba2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,4 +1,4 @@
-Hadoop HDFS Change Log
+ Hadoop HDFS Change Log
 
 Trunk (Unreleased)
 
@@ -788,6 +788,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8403. Eliminate retries in TestFileCreation
     #testOverwriteOpenForWrite. (Arpit Agarwal via wheat9)
 
+    HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions
+    (Rakesh R via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93972a33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index b499e74..0fa1cd5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -667,29 +667,28 @@ public class SecondaryNameNode implements Runnable,
       opts.usage();
       System.exit(0);
     }
-    
-    StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
-    Configuration tconf = new HdfsConfiguration();
-    SecondaryNameNode secondary = null;
+
     try {
+      StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
+      Configuration tconf = new HdfsConfiguration();
+      SecondaryNameNode secondary = null;
       secondary = new SecondaryNameNode(tconf, opts);
-    } catch (IOException ioe) {
-      LOG.fatal("Failed to start secondary namenode", ioe);
-      terminate(1);
-    }
 
-    if (opts != null && opts.getCommand() != null) {
-      int ret = secondary.processStartupCommand(opts);
-      terminate(ret);
-    }
+      if (opts != null && opts.getCommand() != null) {
+        int ret = secondary.processStartupCommand(opts);
+        terminate(ret);
+      }
 
-    if (secondary != null) {
-      secondary.startCheckpointThread();
-      secondary.join();
+      if (secondary != null) {
+        secondary.startCheckpointThread();
+        secondary.join();
+      }
+    } catch (Throwable e) {
+      LOG.fatal("Failed to start secondary namenode", e);
+      terminate(1);
     }
   }
-  
-  
+
   public void startCheckpointThread() {
     Preconditions.checkState(checkpointThread == null,
         "Should not already have a thread");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93972a33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 01621ad..4d3cb75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -60,6 +60,8 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
+import org.apache.hadoop.util.ExitUtil.ExitException;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Logger;
 import org.junit.After;
@@ -87,6 +89,8 @@ public class TestStartup {
 
   @Before
   public void setUp() throws Exception {
+    ExitUtil.disableSystemExit();
+    ExitUtil.resetFirstExitException();
     config = new HdfsConfiguration();
     hdfsDir = new File(MiniDFSCluster.getBaseDirectory());
 
@@ -403,7 +407,19 @@ public class TestStartup {
         cluster.shutdown();
     }
   }
-  
+
+  @Test(timeout = 30000)
+  public void testSNNStartupWithRuntimeException() throws Exception {
+    String[] argv = new String[] { "-checkpoint" };
+    try {
+      SecondaryNameNode.main(argv);
+      fail("Failed to handle runtime exceptions during SNN startup!");
+    } catch (ExitException ee) {
+      GenericTestUtils.assertExceptionContains("ExitException", ee);
+      assertTrue("Didn't termiated properly ", ExitUtil.terminateCalled());
+    }
+  }
+
   @Test
   public void testCompression() throws IOException {
     LOG.info("Test compressing image.");


[20/50] [abbrv] hadoop git commit: HADOOP-11944. add option to test-patch to avoid relocating patch process directory (Sean Busbey via aw)

Posted by ji...@apache.org.
HADOOP-11944. add option to test-patch to avoid relocating patch process directory (Sean Busbey via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcc17866
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcc17866
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcc17866

Branch: refs/heads/HDFS-7240
Commit: bcc17866ddb616e8c70e5aa044becd7a7d1bee35
Parents: 182d86d
Author: Allen Wittenauer <aw...@apache.org>
Authored: Mon May 18 16:13:50 2015 +0000
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Mon May 18 16:13:50 2015 +0000

----------------------------------------------------------------------
 dev-support/test-patch.sh                       | 28 +++++++++++---------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 2 files changed, 18 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc17866/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 00a638c..ae74c5b 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -38,6 +38,7 @@ function setup_defaults
   HOW_TO_CONTRIBUTE="https://wiki.apache.org/hadoop/HowToContribute"
   JENKINS=false
   BASEDIR=$(pwd)
+  RELOCATE_PATCH_DIR=false
 
   FINDBUGS_HOME=${FINDBUGS_HOME:-}
   ECLIPSE_HOME=${ECLIPSE_HOME:-}
@@ -607,6 +608,7 @@ function hadoop_usage
   echo "--eclipse-home=<path>  Eclipse home directory (default ECLIPSE_HOME environment variable)"
   echo "--jira-cmd=<cmd>       The 'jira' command to use (default 'jira')"
   echo "--jira-password=<pw>   The password for the 'jira' command"
+  echo "--mv-patch-dir         Move the patch-dir into the basedir during cleanup."
   echo "--wget-cmd=<cmd>       The 'wget' command to use (default 'wget')"
 }
 
@@ -692,6 +694,9 @@ function parse_args
       --mvn-cmd=*)
         MVN=${i#*=}
       ;;
+      --mv-patch-dir)
+        RELOCATE_PATCH_DIR=true;
+      ;;
       --offline)
         OFFLINE=true
       ;;
@@ -2323,19 +2328,16 @@ function cleanup_and_exit
 {
   local result=$1
 
-  if [[ ${JENKINS} == "true" ]] ; then
-    if [[ -e "${PATCH_DIR}" ]] ; then
-      if [[ -d "${PATCH_DIR}" ]]; then
-        # if PATCH_DIR is already inside BASEDIR, then
-        # there is no need to move it since we assume that
-        # Jenkins or whatever already knows where it is at
-        # since it told us to put it there!
-        relative_patchdir >/dev/null
-        if [[ $? == 1 ]]; then
-          hadoop_debug "mv ${PATCH_DIR} ${BASEDIR}"
-          mv "${PATCH_DIR}" "${BASEDIR}"
-        fi
-      fi
+  if [[ ${JENKINS} == "true" && ${RELOCATE_PATCH_DIR} == "true" && \
+      -e ${PATCH_DIR} && -d ${PATCH_DIR} ]] ; then
+    # if PATCH_DIR is already inside BASEDIR, then
+    # there is no need to move it since we assume that
+    # Jenkins or whatever already knows where it is at
+    # since it told us to put it there!
+    relative_patchdir >/dev/null
+    if [[ $? == 1 ]]; then
+      hadoop_debug "mv ${PATCH_DIR} ${BASEDIR}"
+      mv "${PATCH_DIR}" "${BASEDIR}"
     fi
   fi
   big_console_header "Finished build."

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc17866/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1c2cdaa..8f66072 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -578,6 +578,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11884. test-patch.sh should pull the real findbugs version
     (Kengo Seki via aw)
 
+    HADOOP-11944. add option to test-patch to avoid relocating patch process
+    directory (Sean Busbey via aw)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp


[18/50] [abbrv] hadoop git commit: HDFS-8412. Fix the test failures in HTTPFS: In some tests setReplication called after fs close. Contributed by Uma Maheswara Rao G.

Posted by ji...@apache.org.
HDFS-8412. Fix the test failures in HTTPFS: In some tests setReplication called after fs close. Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6af0248
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6af0248
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6af0248

Branch: refs/heads/HDFS-7240
Commit: a6af0248e9ec75e8e46ac96593070e0c9841a660
Parents: 363c355
Author: Uma Maheswara Rao G <um...@apache.org>
Authored: Mon May 18 19:35:53 2015 +0530
Committer: Uma Maheswara Rao G <um...@apache.org>
Committed: Mon May 18 19:35:53 2015 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java  | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                        | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6af0248/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 2cc67d4..0e082cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -465,8 +465,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     OutputStream os = fs.create(path);
     os.write(1);
     os.close();
-    fs.close();
     fs.setReplication(path, (short) 2);
+    fs.close();
 
     fs = getHttpFSFileSystem();
     fs.setReplication(path, (short) 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6af0248/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3e0d360..8d0c5b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -332,6 +332,8 @@ Trunk (Unreleased)
 
     HDFS-8332. DFS client API calls should check filesystem closed (Rakesh R via umamahesh)
 
+    HDFS-8412. Fix the test failures in HTTPFS. (umamahesh)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[33/50] [abbrv] hadoop git commit: YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei Yang

Posted by ji...@apache.org.
YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5009ad4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5009ad4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5009ad4a

Branch: refs/heads/HDFS-7240
Commit: 5009ad4a7f712fc578b461ecec53f7f97eaaed0c
Parents: de30d66
Author: Xuan <xg...@apache.org>
Authored: Tue May 19 09:56:01 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Tue May 19 09:56:01 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  2 +
 .../hadoop/yarn/client/TestRMFailover.java      | 78 ++++++++++----------
 2 files changed, 40 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5009ad4a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c6f753d..e17e9c7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -509,6 +509,8 @@ Release 2.7.1 - UNRELEASED
     YARN-3526. ApplicationMaster tracking URL is incorrectly redirected
     on a QJM cluster. (Weiwei Yang via xgong)
 
+    YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5009ad4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index d4fc5c1..cd22743 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.yarn.client;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
-import java.util.List;
-import java.util.Map;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,7 +48,6 @@ import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestRMFailover extends ClientBaseWithFixes {
@@ -275,10 +274,6 @@ public class TestRMFailover extends ClientBaseWithFixes {
     assertEquals(404, response.getResponseCode());
   }
 
-  // ignore this testcase, Always gets "too many redirect loops" exception
-  // Probably because of the limitation of MiniYARNCluster.
-  // Verified the behavior in a single node cluster.
-  @Ignore
   @Test
   public void testRMWebAppRedirect() throws YarnException,
       InterruptedException, IOException {
@@ -290,59 +285,62 @@ public class TestRMFailover extends ClientBaseWithFixes {
     getAdminService(0).transitionToActive(req);
     String rm1Url = "http://0.0.0.0:18088";
     String rm2Url = "http://0.0.0.0:28088";
-    String header = getHeader("Refresh", rm2Url);
-    assertTrue(header.contains("; url=" + rm1Url));
+    String redirectURL = getRedirectURL(rm2Url);
+    // if uri is null, RMWebAppFilter will append a slash at the trail of the redirection url
+    assertEquals(redirectURL,rm1Url+"/");
 
-    header = getHeader("Refresh", rm2Url + "/metrics");
-    assertTrue(header.contains("; url=" + rm1Url));
+    redirectURL = getRedirectURL(rm2Url + "/metrics");
+    assertEquals(redirectURL,rm1Url + "/metrics");
 
-    header = getHeader("Refresh", rm2Url + "/jmx");
-    assertTrue(header.contains("; url=" + rm1Url));
+    redirectURL = getRedirectURL(rm2Url + "/jmx");
+    assertEquals(redirectURL,rm1Url + "/jmx");
 
     // standby RM links /conf, /stacks, /logLevel, /static, /logs,
     // /cluster/cluster as well as webService
     // /ws/v1/cluster/info should not be redirected to active RM
-    header = getHeader("Refresh", rm2Url + "/cluster/cluster");
-    assertEquals(null, header);
+    redirectURL = getRedirectURL(rm2Url + "/cluster/cluster");
+    assertNull(redirectURL);
 
-    header = getHeader("Refresh", rm2Url + "/conf");
-    assertEquals(null, header);
+    redirectURL = getRedirectURL(rm2Url + "/conf");
+    assertNull(redirectURL);
 
-    header = getHeader("Refresh", rm2Url + "/stacks");
-    assertEquals(null, header);
+    redirectURL = getRedirectURL(rm2Url + "/stacks");
+    assertNull(redirectURL);
 
-    header = getHeader("Refresh", rm2Url + "/logLevel");
-    assertEquals(null, header);
+    redirectURL = getRedirectURL(rm2Url + "/logLevel");
+    assertNull(redirectURL);
 
-    header = getHeader("Refresh", rm2Url + "/static");
-    assertEquals(null, header);
+    redirectURL = getRedirectURL(rm2Url + "/static");
+    assertNull(redirectURL);
 
-    header = getHeader("Refresh", rm2Url + "/logs");
-    assertEquals(null, header);
+    redirectURL = getRedirectURL(rm2Url + "/logs");
+    assertNull(redirectURL);
 
-    header = getHeader("Refresh", rm2Url + "/ws/v1/cluster/info");
-    assertEquals(null, header);
+    redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/info");
+    assertNull(redirectURL);
 
-    header = getHeader("Refresh", rm2Url + "/ws/v1/cluster/apps");
-    assertTrue(header.contains("; url=" + rm1Url));
+    redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/apps");
+    assertEquals(redirectURL, rm1Url + "/ws/v1/cluster/apps");
 
-    header = getHeader("Refresh", rm2Url + "/proxy/" + fakeAppId);
-    assertEquals(null, header);
-
-    // Due to the limitation of MiniYARNCluster and dispatcher is a singleton,
-    // we couldn't add the test case after explicitFailover();
+    redirectURL = getRedirectURL(rm2Url + "/proxy/" + fakeAppId);
+    assertNull(redirectURL);
   }
 
-  static String getHeader(String field, String url) {
-    String fieldHeader = null;
+  // set up http connection with the given url and get the redirection url from the response
+  // return null if the url is not redirected
+  static String getRedirectURL(String url) {
+    String redirectUrl = null;
     try {
-      Map<String, List<String>> map =
-          new URL(url).openConnection().getHeaderFields();
-      fieldHeader = map.get(field).get(0);
+      HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
+      // do not automatically follow the redirection
+      // otherwise we get too many redirections exception
+      conn.setInstanceFollowRedirects(false);
+      if(conn.getResponseCode() == HttpServletResponse.SC_TEMPORARY_REDIRECT)
+        redirectUrl = conn.getHeaderField("Location");
     } catch (Exception e) {
       // throw new RuntimeException(e);
     }
-    return fieldHeader;
+    return redirectUrl;
   }
 
 }


[49/50] [abbrv] hadoop git commit: YARN-3681. yarn cmd says "could not find main class 'queue'" in windows. Contributed by Craig Welch and Varun Saxena

Posted by ji...@apache.org.
YARN-3681. yarn cmd says "could not find main class 'queue'" in windows.
Contributed by Craig Welch and Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5774f6b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5774f6b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5774f6b1

Branch: refs/heads/HDFS-7240
Commit: 5774f6b1e577ee64bde8c7c1e39f404b9e651176
Parents: 03f897f
Author: Xuan <xg...@apache.org>
Authored: Wed May 20 14:21:12 2015 -0700
Committer: Xuan <xg...@apache.org>
Committed: Wed May 20 14:21:12 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt              | 3 +++
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5774f6b1/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d1da808..dd2d8f4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -526,6 +526,9 @@ Release 2.7.1 - UNRELEASED
     YARN-2918. RM should not fail on startup if queue's configured labels do
     not exist in cluster-node-labels. (Wangda Tan via jianhe)
 
+    YARN-3681. yarn cmd says "could not find main class 'queue'" in windows.
+    (Craig Welch and Varun Saxena via xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5774f6b1/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index c29ee53..91c90fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -150,7 +150,7 @@ if "%1" == "--loglevel" (
   )
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^
-     application applicationattempt container node logs daemonlog historyserver ^
+     application applicationattempt container node queue logs daemonlog historyserver ^
      timelineserver classpath
   for %%i in ( %yarncommands% ) do (
     if %yarn-command% == %%i set yarncommand=true


[04/50] [abbrv] hadoop git commit: HADOOP-11960. Enable Azure-Storage Client Side logging. Contributed by Dushyanth.

Posted by ji...@apache.org.
HADOOP-11960. Enable Azure-Storage Client Side logging. Contributed by Dushyanth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb8e69a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb8e69a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb8e69a8

Branch: refs/heads/HDFS-7240
Commit: cb8e69a80cecb95abdfc93a787bea0bedef275ed
Parents: ee7beda
Author: cnauroth <cn...@apache.org>
Authored: Thu May 14 22:22:24 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Thu May 14 22:22:24 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../fs/azure/AzureNativeFileSystemStore.java    |   5 +
 .../TestNativeAzureFileSystemClientLogging.java | 130 +++++++++++++++++++
 3 files changed, 138 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb8e69a8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2f8acb0..aecfde4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -569,6 +569,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11713. ViewFileSystem should support snapshot methods.
     (Rakesh R via cnauroth)
 
+    HADOOP-11960. Enable Azure-Storage Client Side logging.
+    (Dushyanth via cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb8e69a8/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 5dc0963..3267d8b 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -148,6 +148,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private static final String KEY_SELF_THROTTLE_READ_FACTOR = "fs.azure.selfthrottling.read.factor";
   private static final String KEY_SELF_THROTTLE_WRITE_FACTOR = "fs.azure.selfthrottling.write.factor";
 
+  private static final String KEY_ENABLE_STORAGE_CLIENT_LOGGING = "fs.azure.storage.client.logging";
+
   private static final String PERMISSION_METADATA_KEY = "hdi_permission";
   private static final String OLD_PERMISSION_METADATA_KEY = "asv_permission";
   private static final String IS_FOLDER_METADATA_KEY = "hdi_isfolder";
@@ -681,6 +683,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     selfThrottlingWriteFactor = sessionConfiguration.getFloat(
         KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR);
 
+    OperationContext.setLoggingEnabledByDefault(sessionConfiguration.
+        getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false));
+
     if (LOG.isDebugEnabled()) {
       LOG.debug(String
           .format(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb8e69a8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
new file mode 100644
index 0000000..da39fa3
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.net.URI;
+import java.util.StringTokenizer;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+/**
+ * Test to validate Azure storage client side logging. Tests works only when
+ * testing with Live Azure storage because Emulator does not have support for
+ * client-side logging.
+ *
+ */
+public class TestNativeAzureFileSystemClientLogging extends
+    NativeAzureFileSystemBaseTest {
+
+  private AzureBlobStorageTestAccount testAccount;
+
+  // Core-site config controlling Azure Storage Client logging
+  private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging";
+
+  // Temporary directory created using WASB.
+  private static final String TEMP_DIR = "tempDir";
+
+  /*
+   * Helper method to verify the client logging is working. This check primarily
+   * checks to make sure we see a line in the logs corresponding to the entity
+   * that is created during test run.
+   */
+  private boolean verifyStorageClientLogs(String capturedLogs, String entity)
+      throws Exception {
+
+    URI uri = testAccount.getRealAccount().getBlobEndpoint();
+    String container = testAccount.getRealContainer().getName();
+    String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR
+        + entity;
+    boolean entityFound = false;
+
+    StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n");
+
+    while (tokenizer.hasMoreTokens()) {
+      String token = tokenizer.nextToken();
+      if (token.contains(validateString)) {
+        entityFound = true;
+        break;
+      }
+    }
+    return entityFound;
+  }
+
+  /*
+   * Helper method that updates the core-site config to enable/disable logging.
+   */
+  private void updateFileSystemConfiguration(Boolean loggingFlag)
+      throws Exception {
+
+    Configuration conf = fs.getConf();
+    conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString());
+    URI uri = fs.getUri();
+    fs.initialize(uri, conf);
+  }
+
+  // Using WASB code to communicate with Azure Storage.
+  private void performWASBOperations() throws Exception {
+
+    Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR);
+    fs.mkdirs(tempDir);
+    fs.delete(tempDir, true);
+  }
+
+  @Test
+  public void testLoggingEnabled() throws Exception {
+
+    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
+        .getRootLogger()));
+
+    // Update configuration based on the Test.
+    updateFileSystemConfiguration(true);
+
+    performWASBOperations();
+
+    assertTrue(verifyStorageClientLogs(logs.getOutput(), TEMP_DIR));
+  }
+
+  @Test
+  public void testLoggingDisabled() throws Exception {
+
+    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
+        .getRootLogger()));
+
+    // Update configuration based on the Test.
+    updateFileSystemConfiguration(false);
+
+    performWASBOperations();
+
+    assertFalse(verifyStorageClientLogs(logs.getOutput(), TEMP_DIR));
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create();
+    return testAccount;
+  }
+}
\ No newline at end of file