You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/06/08 19:18:59 UTC

[01/50] hadoop git commit: HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to BlockPlacementPolicyRackFaultTolerant. (wang)

Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 d88f30ba5 -> 0a3c14782


HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to BlockPlacementPolicyRackFaultTolerant. (wang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f407fc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f407fc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f407fc8

Branch: refs/heads/YARN-2928
Commit: 0f407fc80e228027e5eb01c620d3dd8b47004335
Parents: bd224ca
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Jun 2 15:48:26 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:11 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../BlockPlacementPolicyRackFaultTolarent.java  | 154 --------------
 .../BlockPlacementPolicyRackFaultTolerant.java  | 154 ++++++++++++++
 ...stBlockPlacementPolicyRackFaultTolarent.java | 209 -------------------
 ...stBlockPlacementPolicyRackFaultTolerant.java | 209 +++++++++++++++++++
 5 files changed, 366 insertions(+), 363 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f407fc8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9d427ff..2ce54c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -594,6 +594,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8386. Improve synchronization of 'streamer' reference in
     DFSOutputStream. (Rakesh R via wang)
 
+    HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to
+    BlockPlacementPolicyRackFaultTolerant. (wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f407fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java
deleted file mode 100644
index 4dbf384..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.blockmanagement;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.net.Node;
-import org.apache.hadoop.net.NodeBase;
-
-import java.util.*;
-
-/**
- * The class is responsible for choosing the desired number of targets
- * for placing block replicas.
- * The strategy is that it tries its best to place the replicas to most racks.
- */
-@InterfaceAudience.Private
-public class BlockPlacementPolicyRackFaultTolarent extends BlockPlacementPolicyDefault {
-
-  @Override
-  protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) {
-    int clusterSize = clusterMap.getNumOfLeaves();
-    int totalNumOfReplicas = numOfChosen + numOfReplicas;
-    if (totalNumOfReplicas > clusterSize) {
-      numOfReplicas -= (totalNumOfReplicas-clusterSize);
-      totalNumOfReplicas = clusterSize;
-    }
-    // No calculation needed when there is only one rack or picking one node.
-    int numOfRacks = clusterMap.getNumOfRacks();
-    if (numOfRacks == 1 || totalNumOfReplicas <= 1) {
-      return new int[] {numOfReplicas, totalNumOfReplicas};
-    }
-    if(totalNumOfReplicas<numOfRacks){
-      return new int[] {numOfReplicas, 1};
-    }
-    int maxNodesPerRack = (totalNumOfReplicas - 1) / numOfRacks + 1;
-    return new int[] {numOfReplicas, maxNodesPerRack};
-  }
-
-  /**
-   * Choose numOfReplicas in order:
-   * 1. If total replica expected is less than numOfRacks in cluster, it choose
-   * randomly.
-   * 2. If total replica expected is bigger than numOfRacks, it choose:
-   *  2a. Fill each rack exactly (maxNodesPerRack-1) replicas.
-   *  2b. For some random racks, place one more replica to each one of them, until
-   *  numOfReplicas have been chosen. <br>
-   * In the end, the difference of the numbers of replicas for each two racks
-   * is no more than 1.
-   * Either way it always prefer local storage.
-   * @return local node of writer
-   */
-  @Override
-  protected Node chooseTargetInOrder(int numOfReplicas,
-                                 Node writer,
-                                 final Set<Node> excludedNodes,
-                                 final long blocksize,
-                                 final int maxNodesPerRack,
-                                 final List<DatanodeStorageInfo> results,
-                                 final boolean avoidStaleNodes,
-                                 final boolean newBlock,
-                                 EnumMap<StorageType, Integer> storageTypes)
-                                 throws NotEnoughReplicasException {
-    int totalReplicaExpected = results.size() + numOfReplicas;
-    int numOfRacks = clusterMap.getNumOfRacks();
-    if (totalReplicaExpected < numOfRacks ||
-        totalReplicaExpected % numOfRacks == 0) {
-      writer = chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
-          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
-      return writer;
-    }
-
-    assert totalReplicaExpected > (maxNodesPerRack -1) * numOfRacks;
-
-    // Calculate numOfReplicas for filling each rack exactly (maxNodesPerRack-1)
-    // replicas.
-    HashMap<String, Integer> rackCounts = new HashMap<>();
-    for (DatanodeStorageInfo dsInfo : results) {
-      String rack = dsInfo.getDatanodeDescriptor().getNetworkLocation();
-      Integer count = rackCounts.get(rack);
-      if (count != null) {
-        rackCounts.put(rack, count + 1);
-      } else {
-        rackCounts.put(rack, 1);
-      }
-    }
-    int excess = 0; // Sum of the above (maxNodesPerRack-1) part of nodes in results
-    for (int count : rackCounts.values()) {
-      if (count > maxNodesPerRack -1) {
-        excess += count - (maxNodesPerRack -1);
-      }
-    }
-    numOfReplicas = Math.min(totalReplicaExpected - results.size(),
-        (maxNodesPerRack -1) * numOfRacks - (results.size() - excess));
-
-    // Fill each rack exactly (maxNodesPerRack-1) replicas.
-    writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes),
-        blocksize, maxNodesPerRack -1, results, avoidStaleNodes, storageTypes);
-
-    for (DatanodeStorageInfo resultStorage : results) {
-      addToExcludedNodes(resultStorage.getDatanodeDescriptor(), excludedNodes);
-    }
-
-    // For some racks, place one more replica to each one of them.
-    numOfReplicas = totalReplicaExpected - results.size();
-    chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
-        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
-
-    return writer;
-  }
-
-  /**
-   * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
-   * Except that 1st replica prefer local storage.
-   * @return local node of writer.
-   */
-  private Node chooseOnce(int numOfReplicas,
-                            Node writer,
-                            final Set<Node> excludedNodes,
-                            final long blocksize,
-                            final int maxNodesPerRack,
-                            final List<DatanodeStorageInfo> results,
-                            final boolean avoidStaleNodes,
-                            EnumMap<StorageType, Integer> storageTypes)
-                            throws NotEnoughReplicasException {
-    if (numOfReplicas == 0) {
-      return writer;
-    }
-    writer = chooseLocalStorage(writer, excludedNodes, blocksize,
-        maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
-        .getDatanodeDescriptor();
-    if (--numOfReplicas == 0) {
-      return writer;
-    }
-    chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
-        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
-    return writer;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f407fc8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
new file mode 100644
index 0000000..f25fb15
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.net.NodeBase;
+
+import java.util.*;
+
+/**
+ * The class is responsible for choosing the desired number of targets
+ * for placing block replicas.
+ * The strategy is that it tries its best to place the replicas to most racks.
+ */
+@InterfaceAudience.Private
+public class BlockPlacementPolicyRackFaultTolerant extends BlockPlacementPolicyDefault {
+
+  @Override
+  protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) {
+    int clusterSize = clusterMap.getNumOfLeaves();
+    int totalNumOfReplicas = numOfChosen + numOfReplicas;
+    if (totalNumOfReplicas > clusterSize) {
+      numOfReplicas -= (totalNumOfReplicas-clusterSize);
+      totalNumOfReplicas = clusterSize;
+    }
+    // No calculation needed when there is only one rack or picking one node.
+    int numOfRacks = clusterMap.getNumOfRacks();
+    if (numOfRacks == 1 || totalNumOfReplicas <= 1) {
+      return new int[] {numOfReplicas, totalNumOfReplicas};
+    }
+    if(totalNumOfReplicas<numOfRacks){
+      return new int[] {numOfReplicas, 1};
+    }
+    int maxNodesPerRack = (totalNumOfReplicas - 1) / numOfRacks + 1;
+    return new int[] {numOfReplicas, maxNodesPerRack};
+  }
+
+  /**
+   * Choose numOfReplicas in order:
+   * 1. If total replica expected is less than numOfRacks in cluster, it choose
+   * randomly.
+   * 2. If total replica expected is bigger than numOfRacks, it choose:
+   *  2a. Fill each rack exactly (maxNodesPerRack-1) replicas.
+   *  2b. For some random racks, place one more replica to each one of them, until
+   *  numOfReplicas have been chosen. <br>
+   * In the end, the difference of the numbers of replicas for each two racks
+   * is no more than 1.
+   * Either way it always prefer local storage.
+   * @return local node of writer
+   */
+  @Override
+  protected Node chooseTargetInOrder(int numOfReplicas,
+                                 Node writer,
+                                 final Set<Node> excludedNodes,
+                                 final long blocksize,
+                                 final int maxNodesPerRack,
+                                 final List<DatanodeStorageInfo> results,
+                                 final boolean avoidStaleNodes,
+                                 final boolean newBlock,
+                                 EnumMap<StorageType, Integer> storageTypes)
+                                 throws NotEnoughReplicasException {
+    int totalReplicaExpected = results.size() + numOfReplicas;
+    int numOfRacks = clusterMap.getNumOfRacks();
+    if (totalReplicaExpected < numOfRacks ||
+        totalReplicaExpected % numOfRacks == 0) {
+      writer = chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
+          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+      return writer;
+    }
+
+    assert totalReplicaExpected > (maxNodesPerRack -1) * numOfRacks;
+
+    // Calculate numOfReplicas for filling each rack exactly (maxNodesPerRack-1)
+    // replicas.
+    HashMap<String, Integer> rackCounts = new HashMap<>();
+    for (DatanodeStorageInfo dsInfo : results) {
+      String rack = dsInfo.getDatanodeDescriptor().getNetworkLocation();
+      Integer count = rackCounts.get(rack);
+      if (count != null) {
+        rackCounts.put(rack, count + 1);
+      } else {
+        rackCounts.put(rack, 1);
+      }
+    }
+    int excess = 0; // Sum of the above (maxNodesPerRack-1) part of nodes in results
+    for (int count : rackCounts.values()) {
+      if (count > maxNodesPerRack -1) {
+        excess += count - (maxNodesPerRack -1);
+      }
+    }
+    numOfReplicas = Math.min(totalReplicaExpected - results.size(),
+        (maxNodesPerRack -1) * numOfRacks - (results.size() - excess));
+
+    // Fill each rack exactly (maxNodesPerRack-1) replicas.
+    writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes),
+        blocksize, maxNodesPerRack -1, results, avoidStaleNodes, storageTypes);
+
+    for (DatanodeStorageInfo resultStorage : results) {
+      addToExcludedNodes(resultStorage.getDatanodeDescriptor(), excludedNodes);
+    }
+
+    // For some racks, place one more replica to each one of them.
+    numOfReplicas = totalReplicaExpected - results.size();
+    chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
+        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+
+    return writer;
+  }
+
+  /**
+   * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
+   * Except that 1st replica prefer local storage.
+   * @return local node of writer.
+   */
+  private Node chooseOnce(int numOfReplicas,
+                            Node writer,
+                            final Set<Node> excludedNodes,
+                            final long blocksize,
+                            final int maxNodesPerRack,
+                            final List<DatanodeStorageInfo> results,
+                            final boolean avoidStaleNodes,
+                            EnumMap<StorageType, Integer> storageTypes)
+                            throws NotEnoughReplicasException {
+    if (numOfReplicas == 0) {
+      return writer;
+    }
+    writer = chooseLocalStorage(writer, excludedNodes, blocksize,
+        maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
+        .getDatanodeDescriptor();
+    if (--numOfReplicas == 0) {
+      return writer;
+    }
+    chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
+        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+    return writer;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f407fc8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolarent.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolarent.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolarent.java
deleted file mode 100644
index d86a267..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolarent.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolarent;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.net.StaticMapping;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.*;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class TestBlockPlacementPolicyRackFaultTolarent {
-
-  private static final int DEFAULT_BLOCK_SIZE = 1024;
-  private MiniDFSCluster cluster = null;
-  private NamenodeProtocols nameNodeRpc = null;
-  private FSNamesystem namesystem = null;
-  private PermissionStatus perm = null;
-
-  @Before
-  public void setup() throws IOException {
-    StaticMapping.resetMap();
-    Configuration conf = new HdfsConfiguration();
-    final ArrayList<String> rackList = new ArrayList<String>();
-    final ArrayList<String> hostList = new ArrayList<String>();
-    for (int i = 0; i < 10; i++) {
-      for (int j = 0; j < 2; j++) {
-        rackList.add("/rack" + i);
-        hostList.add("/host" + i + j);
-      }
-    }
-    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
-        BlockPlacementPolicyRackFaultTolarent.class,
-        BlockPlacementPolicy.class);
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
-    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
-    cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(hostList.size())
-        .racks(rackList.toArray(new String[rackList.size()]))
-        .hosts(hostList.toArray(new String[hostList.size()]))
-        .build();
-    cluster.waitActive();
-    nameNodeRpc = cluster.getNameNodeRpc();
-    namesystem = cluster.getNamesystem();
-    perm = new PermissionStatus("TestBlockPlacementPolicyEC", null,
-        FsPermission.getDefault());
-  }
-
-  @After
-  public void teardown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testChooseTarget() throws Exception {
-    doTestChooseTargetNormalCase();
-    doTestChooseTargetSpecialCase();
-  }
-
-  private void doTestChooseTargetNormalCase() throws Exception {
-    String clientMachine = "client.foo.com";
-    short[][] testSuite = {
-        {3, 2}, {3, 7}, {3, 8}, {3, 10}, {9, 1}, {10, 1}, {10, 6}, {11, 6},
-        {11, 9}
-    };
-    // Test 5 files
-    int fileCount = 0;
-    for (int i = 0; i < 5; i++) {
-      for (short[] testCase : testSuite) {
-        short replication = testCase[0];
-        short additionalReplication = testCase[1];
-        String src = "/testfile" + (fileCount++);
-        // Create the file with client machine
-        HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
-            clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
-            replication, DEFAULT_BLOCK_SIZE, null, false);
-
-        //test chooseTarget for new file
-        LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
-            null, null, fileStatus.getFileId(), null);
-        doTestLocatedBlock(replication, locatedBlock);
-
-        //test chooseTarget for existing file.
-        LocatedBlock additionalLocatedBlock =
-            nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
-                locatedBlock.getBlock(), locatedBlock.getLocations(),
-                locatedBlock.getStorageIDs(), new DatanodeInfo[0],
-                additionalReplication, clientMachine);
-        doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
-      }
-    }
-  }
-
-  /**
-   * Test more randomly. So it covers some special cases.
-   * Like when some racks already have 2 replicas, while some racks have none,
-   * we should choose the racks that have none.
-   */
-  private void doTestChooseTargetSpecialCase() throws Exception {
-    String clientMachine = "client.foo.com";
-    // Test 5 files
-    String src = "/testfile_1_";
-    // Create the file with client machine
-    HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
-        clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
-        (short) 20, DEFAULT_BLOCK_SIZE, null, false);
-
-    //test chooseTarget for new file
-    LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
-        null, null, fileStatus.getFileId(), null);
-    doTestLocatedBlock(20, locatedBlock);
-
-    DatanodeInfo[] locs = locatedBlock.getLocations();
-    String[] storageIDs = locatedBlock.getStorageIDs();
-
-    for (int time = 0; time < 5; time++) {
-      shuffle(locs, storageIDs);
-      for (int i = 1; i < locs.length; i++) {
-        DatanodeInfo[] partLocs = new DatanodeInfo[i];
-        String[] partStorageIDs = new String[i];
-        System.arraycopy(locs, 0, partLocs, 0, i);
-        System.arraycopy(storageIDs, 0, partStorageIDs, 0, i);
-        for (int j = 1; j < 20 - i; j++) {
-          LocatedBlock additionalLocatedBlock =
-              nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
-                  locatedBlock.getBlock(), partLocs,
-                  partStorageIDs, new DatanodeInfo[0],
-                  j, clientMachine);
-          doTestLocatedBlock(i + j, additionalLocatedBlock);
-        }
-      }
-    }
-  }
-
-  private void shuffle(DatanodeInfo[] locs, String[] storageIDs) {
-    int length = locs.length;
-    Object[][] pairs = new Object[length][];
-    for (int i = 0; i < length; i++) {
-      pairs[i] = new Object[]{locs[i], storageIDs[i]};
-    }
-    DFSUtil.shuffle(pairs);
-    for (int i = 0; i < length; i++) {
-      locs[i] = (DatanodeInfo) pairs[i][0];
-      storageIDs[i] = (String) pairs[i][1];
-    }
-  }
-
-  private void doTestLocatedBlock(int replication, LocatedBlock locatedBlock) {
-    assertEquals(replication, locatedBlock.getLocations().length);
-
-    HashMap<String, Integer> racksCount = new HashMap<String, Integer>();
-    for (DatanodeInfo node :
-        locatedBlock.getLocations()) {
-      addToRacksCount(node.getNetworkLocation(), racksCount);
-    }
-
-    int minCount = Integer.MAX_VALUE;
-    int maxCount = Integer.MIN_VALUE;
-    for (Integer rackCount : racksCount.values()) {
-      minCount = Math.min(minCount, rackCount);
-      maxCount = Math.max(maxCount, rackCount);
-    }
-    assertTrue(maxCount - minCount <= 1);
-  }
-
-  private void addToRacksCount(String rack, HashMap<String, Integer> racksCount) {
-    Integer count = racksCount.get(rack);
-    if (count == null) {
-      racksCount.put(rack, 1);
-    } else {
-      racksCount.put(rack, count + 1);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f407fc8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
new file mode 100644
index 0000000..ca9da77
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.net.StaticMapping;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestBlockPlacementPolicyRackFaultTolerant {
+
+  private static final int DEFAULT_BLOCK_SIZE = 1024;
+  private MiniDFSCluster cluster = null;
+  private NamenodeProtocols nameNodeRpc = null;
+  private FSNamesystem namesystem = null;
+  private PermissionStatus perm = null;
+
+  @Before
+  public void setup() throws IOException {
+    StaticMapping.resetMap();
+    Configuration conf = new HdfsConfiguration();
+    final ArrayList<String> rackList = new ArrayList<String>();
+    final ArrayList<String> hostList = new ArrayList<String>();
+    for (int i = 0; i < 10; i++) {
+      for (int j = 0; j < 2; j++) {
+        rackList.add("/rack" + i);
+        hostList.add("/host" + i + j);
+      }
+    }
+    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+        BlockPlacementPolicyRackFaultTolerant.class,
+        BlockPlacementPolicy.class);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(hostList.size())
+        .racks(rackList.toArray(new String[rackList.size()]))
+        .hosts(hostList.toArray(new String[hostList.size()]))
+        .build();
+    cluster.waitActive();
+    nameNodeRpc = cluster.getNameNodeRpc();
+    namesystem = cluster.getNamesystem();
+    perm = new PermissionStatus("TestBlockPlacementPolicyEC", null,
+        FsPermission.getDefault());
+  }
+
+  @After
+  public void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testChooseTarget() throws Exception {
+    doTestChooseTargetNormalCase();
+    doTestChooseTargetSpecialCase();
+  }
+
+  private void doTestChooseTargetNormalCase() throws Exception {
+    String clientMachine = "client.foo.com";
+    short[][] testSuite = {
+        {3, 2}, {3, 7}, {3, 8}, {3, 10}, {9, 1}, {10, 1}, {10, 6}, {11, 6},
+        {11, 9}
+    };
+    // Test 5 files
+    int fileCount = 0;
+    for (int i = 0; i < 5; i++) {
+      for (short[] testCase : testSuite) {
+        short replication = testCase[0];
+        short additionalReplication = testCase[1];
+        String src = "/testfile" + (fileCount++);
+        // Create the file with client machine
+        HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
+            clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
+            replication, DEFAULT_BLOCK_SIZE, null, false);
+
+        //test chooseTarget for new file
+        LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
+            null, null, fileStatus.getFileId(), null);
+        doTestLocatedBlock(replication, locatedBlock);
+
+        //test chooseTarget for existing file.
+        LocatedBlock additionalLocatedBlock =
+            nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
+                locatedBlock.getBlock(), locatedBlock.getLocations(),
+                locatedBlock.getStorageIDs(), new DatanodeInfo[0],
+                additionalReplication, clientMachine);
+        doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
+      }
+    }
+  }
+
+  /**
+   * Test more randomly. So it covers some special cases.
+   * Like when some racks already have 2 replicas, while some racks have none,
+   * we should choose the racks that have none.
+   */
+  private void doTestChooseTargetSpecialCase() throws Exception {
+    String clientMachine = "client.foo.com";
+    // Test 5 files
+    String src = "/testfile_1_";
+    // Create the file with client machine
+    HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
+        clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
+        (short) 20, DEFAULT_BLOCK_SIZE, null, false);
+
+    //test chooseTarget for new file
+    LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
+        null, null, fileStatus.getFileId(), null);
+    doTestLocatedBlock(20, locatedBlock);
+
+    DatanodeInfo[] locs = locatedBlock.getLocations();
+    String[] storageIDs = locatedBlock.getStorageIDs();
+
+    for (int time = 0; time < 5; time++) {
+      shuffle(locs, storageIDs);
+      for (int i = 1; i < locs.length; i++) {
+        DatanodeInfo[] partLocs = new DatanodeInfo[i];
+        String[] partStorageIDs = new String[i];
+        System.arraycopy(locs, 0, partLocs, 0, i);
+        System.arraycopy(storageIDs, 0, partStorageIDs, 0, i);
+        for (int j = 1; j < 20 - i; j++) {
+          LocatedBlock additionalLocatedBlock =
+              nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
+                  locatedBlock.getBlock(), partLocs,
+                  partStorageIDs, new DatanodeInfo[0],
+                  j, clientMachine);
+          doTestLocatedBlock(i + j, additionalLocatedBlock);
+        }
+      }
+    }
+  }
+
+  private void shuffle(DatanodeInfo[] locs, String[] storageIDs) {
+    int length = locs.length;
+    Object[][] pairs = new Object[length][];
+    for (int i = 0; i < length; i++) {
+      pairs[i] = new Object[]{locs[i], storageIDs[i]};
+    }
+    DFSUtil.shuffle(pairs);
+    for (int i = 0; i < length; i++) {
+      locs[i] = (DatanodeInfo) pairs[i][0];
+      storageIDs[i] = (String) pairs[i][1];
+    }
+  }
+
+  private void doTestLocatedBlock(int replication, LocatedBlock locatedBlock) {
+    assertEquals(replication, locatedBlock.getLocations().length);
+
+    HashMap<String, Integer> racksCount = new HashMap<String, Integer>();
+    for (DatanodeInfo node :
+        locatedBlock.getLocations()) {
+      addToRacksCount(node.getNetworkLocation(), racksCount);
+    }
+
+    int minCount = Integer.MAX_VALUE;
+    int maxCount = Integer.MIN_VALUE;
+    for (Integer rackCount : racksCount.values()) {
+      minCount = Math.min(minCount, rackCount);
+      maxCount = Math.max(maxCount, rackCount);
+    }
+    assertTrue(maxCount - minCount <= 1);
+  }
+
+  private void addToRacksCount(String rack, HashMap<String, Integer> racksCount) {
+    Integer count = racksCount.get(rack);
+    if (count == null) {
+      racksCount.put(rack, 1);
+    } else {
+      racksCount.put(rack, count + 1);
+    }
+  }
+}


[27/50] hadoop git commit: HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible. (Contributed by Kengo Seki)

Posted by zj...@apache.org.
HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible. (Contributed by Kengo Seki)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e72a346e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e72a346e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e72a346e

Branch: refs/heads/YARN-2928
Commit: e72a346e1ea6565c4f87c6ed0afd33fa09e1c8da
Parents: 94db4f2
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Jun 4 10:53:16 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:57 2015 -0700

----------------------------------------------------------------------
 dev-support/smart-apply-patch.sh                | 5 ++++-
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e72a346e/dev-support/smart-apply-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh
index be29c47..ebcb660 100755
--- a/dev-support/smart-apply-patch.sh
+++ b/dev-support/smart-apply-patch.sh
@@ -101,7 +101,10 @@ if grep -q "^diff --git" "${PATCH_FILE}"; then
   fi
   # shellcheck disable=SC2086
   git apply ${GIT_FLAGS} "${PATCH_FILE}"
-  exit $?
+  if [[ $? == 0 ]]; then
+    cleanup 0
+  fi
+  echo "git apply failed. Going to apply the patch with: ${PATCH}"
 fi
 
 # Come up with a list of changed files into $TMP

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e72a346e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3bca0bc..942d9e9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -820,6 +820,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12019. update BUILDING.txt to include python for 'mvn site'
     in windows (vinayakumarb)
 
+    HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible.
+    (Kengo Seki via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[47/50] hadoop git commit: HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris Nauroth.

Posted by zj...@apache.org.
HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a3c1478
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a3c1478
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a3c1478

Branch: refs/heads/YARN-2928
Commit: 0a3c14782b2feb3595d02debdfa1598df748bc60
Parents: 77e5bae
Author: cnauroth <cn...@apache.org>
Authored: Mon Jun 8 08:39:02 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:02 2015 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java         | 6 ++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                  | 2 ++
 .../org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java    | 8 ++++----
 3 files changed, 12 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3c1478/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 5fd89c4..9b9e213 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -731,6 +731,12 @@ public class FileUtil {
       }
     }
 
+    if (entry.isLink()) {
+      File src = new File(outputDir, entry.getLinkName());
+      HardLink.createHardLink(src, outputFile);
+      return;
+    }
+
     int count;
     byte data[] = new byte[2048];
     BufferedOutputStream outputStream = new BufferedOutputStream(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3c1478/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 853a022..73574b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -864,6 +864,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8539. Hdfs doesnt have class 'debug' in windows.
     (Anu Engineer via cnauroth)
 
+    HDFS-8554. TestDatanodeLayoutUpgrade fails on Windows. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3c1478/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
index 343320c..224abea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
@@ -37,11 +37,11 @@ public class TestDatanodeLayoutUpgrade {
     upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
     Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-        System.getProperty("test.build.data") + File.separator +
-            "dfs" + File.separator + "data");
+        new File(System.getProperty("test.build.data"),
+            "dfs" + File.separator + "data").toURI().toString());
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
-        System.getProperty("test.build.data") + File.separator +
-            "dfs" + File.separator + "name");
+        new File(System.getProperty("test.build.data"),
+            "dfs" + File.separator + "name").toURI().toString());
     upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
     .manageDataDfsDirs(false).manageNameDfsDirs(false), null);
   }


[36/50] hadoop git commit: MAPREDUCE-6387. Serialize the recently added Task#encryptedSpillKey field at the end. (Arun Suresh via kasha)

Posted by zj...@apache.org.
MAPREDUCE-6387. Serialize the recently added Task#encryptedSpillKey field at the end. (Arun Suresh via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b7063f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b7063f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b7063f2

Branch: refs/heads/YARN-2928
Commit: 7b7063f2e8960053c41f12a39b2557953a2ddac3
Parents: a0962cd
Author: Karthik Kambatla <ka...@apache.org>
Authored: Fri Jun 5 09:14:06 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:59 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                             | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/Task.java             | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b7063f2/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 12e3a3f..e7c02c0 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -520,6 +520,9 @@ Release 2.7.1 - UNRELEASED
     copySucceeded() in one thread and copyFailed() in another thread on the
     same host. (Junping Du via ozawa)
 
+    MAPREDUCE-6387. Serialize the recently added Task#encryptedSpillKey field at 
+    the end. (Arun Suresh via kasha)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b7063f2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index c07d517..673f183 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -513,8 +513,8 @@ abstract public class Task implements Writable, Configurable {
     out.writeBoolean(taskCleanup);
     Text.writeString(out, user);
     out.writeInt(encryptedSpillKey.length);
-    out.write(encryptedSpillKey);
     extraData.write(out);
+    out.write(encryptedSpillKey);
   }
   
   public void readFields(DataInput in) throws IOException {
@@ -541,8 +541,8 @@ abstract public class Task implements Writable, Configurable {
     user = StringInterner.weakIntern(Text.readString(in));
     int len = in.readInt();
     encryptedSpillKey = new byte[len];
-    in.readFully(encryptedSpillKey);
     extraData.readFields(in);
+    in.readFully(encryptedSpillKey);
   }
 
   @Override


[34/50] hadoop git commit: YARN-3259. FairScheduler: Trigger fairShare updates on node events. (Anubhav Dhoot via kasha)

Posted by zj...@apache.org.
YARN-3259. FairScheduler: Trigger fairShare updates on node events. (Anubhav Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f82a100d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f82a100d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f82a100d

Branch: refs/heads/YARN-2928
Commit: f82a100dae589af535c15eac97a5a4aaadede74a
Parents: 7b7063f
Author: Karthik Kambatla <ka...@apache.org>
Authored: Fri Jun 5 09:39:41 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:59 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../scheduler/fair/FSOpDurations.java           |   6 +
 .../scheduler/fair/FairScheduler.java           |  23 +++-
 .../scheduler/fair/TestSchedulingUpdate.java    | 135 +++++++++++++++++++
 4 files changed, 163 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82a100d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 69efca4..d5e8bba 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3547. FairScheduler: Apps that have no resource demand should not participate 
     scheduling. (Xianyin Xin via kasha)
 
+    YARN-3259. FairScheduler: Trigger fairShare updates on node events. 
+    (Anubhav Dhoot via kasha)
+
   BUG FIXES
 
     YARN-3197. Confusing log generated by CapacityScheduler. (Varun Saxena 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82a100d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
index c2282fd..20d2af9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsCollector;
@@ -116,4 +117,9 @@ public class FSOpDurations implements MetricsSource {
   public void addPreemptCallDuration(long value) {
     preemptCall.add(value);
   }
+
+  @VisibleForTesting
+  public boolean hasUpdateThreadRunChanged() {
+    return updateThreadRun.changed();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82a100d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 07b3271..64b3f12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -103,9 +103,9 @@ import com.google.common.base.Preconditions;
  * of the root queue in the typical fair scheduling fashion. Then, the children
  * distribute the resources assigned to them to their children in the same
  * fashion.  Applications may only be scheduled on leaf queues. Queues can be
- * specified as children of other queues by placing them as sub-elements of their
- * parents in the fair scheduler configuration file.
- * 
+ * specified as children of other queues by placing them as sub-elements of
+ * their parents in the fair scheduler configuration file.
+ *
  * A queue's name starts with the names of its parents, with periods as
  * separators.  So a queue named "queue1" under the root named, would be 
  * referred to as "root.queue1", and a queue named "queue2" under a queue
@@ -142,6 +142,8 @@ public class FairScheduler extends
   @VisibleForTesting
   Thread updateThread;
 
+  private final Object updateThreadMonitor = new Object();
+
   @VisibleForTesting
   Thread schedulingThread;
   // timeout to join when we stop this service
@@ -246,6 +248,13 @@ public class FairScheduler extends
     return queueMgr;
   }
 
+  // Allows UpdateThread to start processing without waiting till updateInterval
+  void triggerUpdate() {
+    synchronized (updateThreadMonitor) {
+      updateThreadMonitor.notify();
+    }
+  }
+
   /**
    * Thread which calls {@link FairScheduler#update()} every
    * <code>updateInterval</code> milliseconds.
@@ -256,7 +265,9 @@ public class FairScheduler extends
     public void run() {
       while (!Thread.currentThread().isInterrupted()) {
         try {
-          Thread.sleep(updateInterval);
+          synchronized (updateThreadMonitor) {
+            updateThreadMonitor.wait(updateInterval);
+          }
           long start = getClock().getTime();
           update();
           preemptTasksIfNecessary();
@@ -838,6 +849,8 @@ public class FairScheduler extends
     updateRootQueueMetrics();
     updateMaximumAllocation(schedulerNode, true);
 
+    triggerUpdate();
+
     queueMgr.getRootQueue().setSteadyFairShare(clusterResource);
     queueMgr.getRootQueue().recomputeSteadyShares();
     LOG.info("Added node " + node.getNodeAddress() +
@@ -853,6 +866,8 @@ public class FairScheduler extends
     Resources.subtractFrom(clusterResource, rmNode.getTotalCapability());
     updateRootQueueMetrics();
 
+    triggerUpdate();
+
     // Remove running containers
     List<RMContainer> runningContainers = node.getRunningContainers();
     for (RMContainer container : runningContainers) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82a100d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingUpdate.java
new file mode 100644
index 0000000..94298f4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingUpdate.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestSchedulingUpdate extends FairSchedulerTestBase {
+
+  @Override
+  public Configuration createConfiguration() {
+    Configuration conf = super.createConfiguration();
+
+    // Make the update loop to never finish to ensure zero update calls
+    conf.setInt(
+        FairSchedulerConfiguration.UPDATE_INTERVAL_MS,
+        Integer.MAX_VALUE);
+    return conf;
+  }
+
+  @Before
+  public void setup() {
+    conf = createConfiguration();
+    resourceManager = new MockRM(conf);
+    resourceManager.start();
+
+    scheduler = (FairScheduler) resourceManager.getResourceScheduler();
+  }
+
+  @After
+  public void teardown() {
+    if (resourceManager != null) {
+      resourceManager.stop();
+      resourceManager = null;
+    }
+  }
+
+  @Test (timeout = 3000)
+  public void testSchedulingUpdateOnNodeJoinLeave() throws InterruptedException {
+
+    verifyNoCalls();
+
+    // Add one node
+    String host = "127.0.0.1";
+    final int memory = 4096;
+    final int cores = 4;
+    RMNode node1 = MockNodes.newNodeInfo(
+        1, Resources.createResource(memory, cores), 1, host);
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+
+    long expectedCalls = 1;
+    verifyExpectedCalls(expectedCalls, memory, cores);
+
+    // Remove the node
+    NodeRemovedSchedulerEvent nodeEvent2 = new NodeRemovedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent2);
+
+    expectedCalls = 2;
+    verifyExpectedCalls(expectedCalls, 0, 0);
+  }
+
+  private void verifyExpectedCalls(long expectedCalls, int memory, int vcores)
+    throws InterruptedException {
+    boolean verified = false;
+    int count = 0;
+    while (count < 100) {
+      if (scheduler.fsOpDurations.hasUpdateThreadRunChanged()) {
+        break;
+      }
+      count++;
+      Thread.sleep(10);
+    }
+    assertTrue("Update Thread has not run based on its metrics",
+        scheduler.fsOpDurations.hasUpdateThreadRunChanged());
+    assertEquals("Root queue metrics memory does not have expected value",
+        memory, scheduler.getRootQueueMetrics().getAvailableMB());
+    assertEquals("Root queue metrics cpu does not have expected value",
+        vcores, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
+
+    MetricsCollectorImpl collector = new MetricsCollectorImpl();
+    scheduler.fsOpDurations.getMetrics(collector, true);
+    MetricsRecord record = collector.getRecords().get(0);
+    for (AbstractMetric abstractMetric : record.metrics()) {
+      if (abstractMetric.name().contains("UpdateThreadRunNumOps")) {
+        assertEquals("Update Thread did not run expected number of times " +
+                "based on metric record count",
+            expectedCalls,
+            abstractMetric.value());
+        verified = true;
+      }
+    }
+    assertTrue("Did not find metric for UpdateThreadRunNumOps", verified);
+  }
+
+  private void verifyNoCalls() {
+    assertFalse("Update thread should not have executed",
+        scheduler.fsOpDurations.hasUpdateThreadRunChanged());
+    assertEquals("Scheduler queue memory should not have been updated",
+        0, scheduler.getRootQueueMetrics().getAvailableMB());
+    assertEquals("Scheduler queue cpu should not have been updated",
+        0,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
+  }
+}


[07/50] hadoop git commit: YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM recovery is enabled. Contributed by Rohith Sharmaks

Posted by zj...@apache.org.
YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM recovery is enabled. Contributed by Rohith Sharmaks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6e1fd03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6e1fd03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6e1fd03

Branch: refs/heads/YARN-2928
Commit: d6e1fd0362fff1431fdc3a1116e80ad7a60bde46
Parents: 91a3b9f
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Jun 3 19:44:07 2015 +0000
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:13 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                    |  3 +++
 .../yarn/server/nodemanager/NodeManager.java       | 17 +++++++++++++++--
 2 files changed, 18 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e1fd03/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 21618c7..1841d80 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -706,6 +706,9 @@ Release 2.7.1 - UNRELEASED
     YARN-3725. App submission via REST API is broken in secure mode due to 
     Timeline DT service address is empty. (Zhijie Shen via wangda)
 
+    YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM
+    recovery is enabled (Rohith Sharmaks via jlowe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6e1fd03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 9f34317..2f3d361 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.NodeHealthScriptRunner;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -97,6 +98,7 @@ public class NodeManager extends CompositeService
   
   private AtomicBoolean isStopping = new AtomicBoolean(false);
   private boolean rmWorkPreservingRestartEnabled;
+  private boolean shouldExitOnShutdownEvent = false;
 
   public NodeManager() {
     super(NodeManager.class.getName());
@@ -354,7 +356,16 @@ public class NodeManager extends CompositeService
     new Thread() {
       @Override
       public void run() {
-        NodeManager.this.stop();
+        try {
+          NodeManager.this.stop();
+        } catch (Throwable t) {
+          LOG.error("Error while shutting down NodeManager", t);
+        } finally {
+          if (shouldExitOnShutdownEvent
+              && !ShutdownHookManager.get().isShutdownInProgress()) {
+            ExitUtil.terminate(-1);
+          }
+        }
       }
     }.start();
   }
@@ -563,7 +574,9 @@ public class NodeManager extends CompositeService
       nodeManagerShutdownHook = new CompositeServiceShutdownHook(this);
       ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook,
                                                 SHUTDOWN_HOOK_PRIORITY);
-
+      // System exit should be called only when NodeManager is instantiated from
+      // main() funtion
+      this.shouldExitOnShutdownEvent = true;
       this.init(conf);
       this.start();
     } catch (Throwable t) {


[17/50] hadoop git commit: MAPREDUCE-6174. Combine common stream code into parent class for InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)

Posted by zj...@apache.org.
MAPREDUCE-6174. Combine common stream code into parent class for InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e585863
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e585863
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e585863

Branch: refs/heads/YARN-2928
Commit: 2e58586316f548a8dd2effbc15d0729d1a622fe3
Parents: 2bff83c
Author: Gera Shegalov <ge...@apache.org>
Authored: Wed Jun 3 16:26:45 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:15 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 +
 .../task/reduce/IFileWrappedMapOutput.java      | 72 ++++++++++++++++++++
 .../task/reduce/InMemoryMapOutput.java          | 26 ++-----
 .../mapreduce/task/reduce/MergeManagerImpl.java |  5 +-
 .../mapreduce/task/reduce/OnDiskMapOutput.java  | 33 +++++----
 .../mapreduce/task/reduce/TestFetcher.java      | 27 ++++----
 6 files changed, 114 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ba94324..5cc08a3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -358,6 +358,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-5248. Let NNBenchWithoutMR specify the replication factor for
     its test (Erik Paulson via jlowe)
 
+    MAPREDUCE-6174. Combine common stream code into parent class for
+    InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
new file mode 100644
index 0000000..119db15
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/IFileWrappedMapOutput.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.task.reduce;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.IFileInputStream;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+
+/**
+ * Common code for allowing MapOutput classes to handle streams.
+ *
+ * @param <K> key type for map output
+ * @param <V> value type for map output
+ */
+public abstract class IFileWrappedMapOutput<K, V> extends MapOutput<K, V> {
+  private final Configuration conf;
+  private final MergeManagerImpl<K, V> merger;
+
+  public IFileWrappedMapOutput(
+      Configuration c, MergeManagerImpl<K, V> m, TaskAttemptID mapId,
+      long size, boolean primaryMapOutput) {
+    super(mapId, size, primaryMapOutput);
+    conf = c;
+    merger = m;
+  }
+
+  /**
+   * @return the merger
+   */
+  protected MergeManagerImpl<K, V> getMerger() {
+    return merger;
+  }
+
+  protected abstract void doShuffle(
+      MapHost host, IFileInputStream iFileInputStream,
+      long compressedLength, long decompressedLength,
+      ShuffleClientMetrics metrics, Reporter reporter) throws IOException;
+
+  @Override
+  public void shuffle(MapHost host, InputStream input,
+                      long compressedLength, long decompressedLength,
+                      ShuffleClientMetrics metrics,
+                      Reporter reporter) throws IOException {
+    IFileInputStream iFin =
+        new IFileInputStream(input, compressedLength, conf);
+    try {
+      this.doShuffle(host, iFin, compressedLength,
+                    decompressedLength, metrics, reporter);
+    } finally {
+      iFin.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java
index 24fb3bb..9b61ad5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java
@@ -42,10 +42,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-class InMemoryMapOutput<K, V> extends MapOutput<K, V> {
+class InMemoryMapOutput<K, V> extends IFileWrappedMapOutput<K, V> {
   private static final Log LOG = LogFactory.getLog(InMemoryMapOutput.class);
-  private Configuration conf;
-  private final MergeManagerImpl<K, V> merger;
   private final byte[] memory;
   private BoundedByteArrayOutputStream byteStream;
   // Decompression of map-outputs
@@ -56,9 +54,7 @@ class InMemoryMapOutput<K, V> extends MapOutput<K, V> {
                            MergeManagerImpl<K, V> merger,
                            int size, CompressionCodec codec,
                            boolean primaryMapOutput) {
-    super(mapId, (long)size, primaryMapOutput);
-    this.conf = conf;
-    this.merger = merger;
+    super(conf, merger, mapId, (long)size, primaryMapOutput);
     this.codec = codec;
     byteStream = new BoundedByteArrayOutputStream(size);
     memory = byteStream.getBuffer();
@@ -78,15 +74,12 @@ class InMemoryMapOutput<K, V> extends MapOutput<K, V> {
   }
 
   @Override
-  public void shuffle(MapHost host, InputStream input,
+  protected void doShuffle(MapHost host, IFileInputStream iFin,
                       long compressedLength, long decompressedLength,
                       ShuffleClientMetrics metrics,
                       Reporter reporter) throws IOException {
-    IFileInputStream checksumIn = 
-      new IFileInputStream(input, compressedLength, conf);
+    InputStream input = iFin;
 
-    input = checksumIn;       
-  
     // Are map-outputs compressed?
     if (codec != null) {
       decompressor.reset();
@@ -111,13 +104,6 @@ class InMemoryMapOutput<K, V> extends MapOutput<K, V> {
         throw new IOException("Unexpected extra bytes from input stream for " +
                                getMapId());
       }
-
-    } catch (IOException ioe) {      
-      // Close the streams
-      IOUtils.cleanup(LOG, input);
-
-      // Re-throw
-      throw ioe;
     } finally {
       CodecPool.returnDecompressor(decompressor);
     }
@@ -125,12 +111,12 @@ class InMemoryMapOutput<K, V> extends MapOutput<K, V> {
 
   @Override
   public void commit() throws IOException {
-    merger.closeInMemoryFile(this);
+    getMerger().closeInMemoryFile(this);
   }
   
   @Override
   public void abort() {
-    merger.unreserve(memory.length);
+    getMerger().unreserve(memory.length);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index f788707..c99a330 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -263,8 +263,9 @@ public class MergeManagerImpl<K, V> implements MergeManager<K, V> {
       LOG.info(mapId + ": Shuffling to disk since " + requestedSize + 
                " is greater than maxSingleShuffleLimit (" + 
                maxSingleShuffleLimit + ")");
-      return new OnDiskMapOutput<K,V>(mapId, reduceId, this, requestedSize,
-                                      jobConf, mapOutputFile, fetcher, true);
+      return new OnDiskMapOutput<K,V>(mapId, this, requestedSize, jobConf,
+         fetcher, true, FileSystem.getLocal(jobConf).getRaw(),
+         mapOutputFile.getInputFileForWrite(mapId.getTaskID(), requestedSize));
     }
     
     // Stall shuffle if we are above the memory limit

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
index 8275fd0..f22169d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
@@ -18,13 +18,11 @@
 package org.apache.hadoop.mapreduce.task.reduce;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.OutputStream;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -46,41 +44,46 @@ import com.google.common.annotations.VisibleForTesting;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-class OnDiskMapOutput<K, V> extends MapOutput<K, V> {
+class OnDiskMapOutput<K, V> extends IFileWrappedMapOutput<K, V> {
   private static final Log LOG = LogFactory.getLog(OnDiskMapOutput.class);
   private final FileSystem fs;
   private final Path tmpOutputPath;
   private final Path outputPath;
-  private final MergeManagerImpl<K, V> merger;
   private final OutputStream disk; 
   private long compressedSize;
-  private final Configuration conf;
 
+  @Deprecated
   public OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId,
                          MergeManagerImpl<K,V> merger, long size,
                          JobConf conf,
                          MapOutputFile mapOutputFile,
                          int fetcher, boolean primaryMapOutput)
       throws IOException {
-    this(mapId, reduceId, merger, size, conf, mapOutputFile, fetcher,
+    this(mapId, merger, size, conf, fetcher,
         primaryMapOutput, FileSystem.getLocal(conf).getRaw(),
         mapOutputFile.getInputFileForWrite(mapId.getTaskID(), size));
   }
 
-  @VisibleForTesting
+  @Deprecated
   OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId,
                          MergeManagerImpl<K,V> merger, long size,
                          JobConf conf,
                          MapOutputFile mapOutputFile,
                          int fetcher, boolean primaryMapOutput,
                          FileSystem fs, Path outputPath) throws IOException {
-    super(mapId, size, primaryMapOutput);
+    this(mapId, merger, size, conf, fetcher, primaryMapOutput, fs, outputPath);
+  }
+
+  OnDiskMapOutput(TaskAttemptID mapId,
+                  MergeManagerImpl<K, V> merger, long size,
+                  JobConf conf,
+                  int fetcher, boolean primaryMapOutput,
+                  FileSystem fs, Path outputPath) throws IOException {
+    super(conf, merger, mapId, size, primaryMapOutput);
     this.fs = fs;
-    this.merger = merger;
     this.outputPath = outputPath;
     tmpOutputPath = getTempPath(outputPath, fetcher);
     disk = CryptoUtils.wrapIfNecessary(conf, fs.create(tmpOutputPath));
-    this.conf = conf;
   }
 
   @VisibleForTesting
@@ -89,18 +92,18 @@ class OnDiskMapOutput<K, V> extends MapOutput<K, V> {
   }
 
   @Override
-  public void shuffle(MapHost host, InputStream input,
+  protected void doShuffle(MapHost host, IFileInputStream input,
                       long compressedLength, long decompressedLength,
                       ShuffleClientMetrics metrics,
                       Reporter reporter) throws IOException {
-    input = new IFileInputStream(input, compressedLength, conf);
     // Copy data to local-disk
     long bytesLeft = compressedLength;
     try {
       final int BYTES_TO_READ = 64 * 1024;
       byte[] buf = new byte[BYTES_TO_READ];
       while (bytesLeft > 0) {
-        int n = ((IFileInputStream)input).readWithChecksum(buf, 0, (int) Math.min(bytesLeft, BYTES_TO_READ));
+        int n = input.readWithChecksum(buf, 0,
+                                      (int) Math.min(bytesLeft, BYTES_TO_READ));
         if (n < 0) {
           throw new IOException("read past end of stream reading " + 
                                 getMapId());
@@ -117,7 +120,7 @@ class OnDiskMapOutput<K, V> extends MapOutput<K, V> {
       disk.close();
     } catch (IOException ioe) {
       // Close the streams
-      IOUtils.cleanup(LOG, input, disk);
+      IOUtils.cleanup(LOG, disk);
 
       // Re-throw
       throw ioe;
@@ -139,7 +142,7 @@ class OnDiskMapOutput<K, V> extends MapOutput<K, V> {
     fs.rename(tmpOutputPath, outputPath);
     CompressAwarePath compressAwarePath = new CompressAwarePath(outputPath,
         getSize(), this.compressedSize);
-    merger.closeOnDiskFile(compressAwarePath);
+    getMerger().closeOnDiskFile(compressAwarePath);
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e585863/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
index a9cd33e..7888007 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.mapreduce.task.reduce;
 
 import java.io.FilterInputStream;
-
 import java.lang.Void;
-
 import java.net.HttpURLConnection;
 
 import org.apache.hadoop.fs.ChecksumException;
@@ -30,13 +28,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.MapOutputFile;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskID;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.rules.TestName;
-import static org.junit.Assert.*;
 
+import static org.junit.Assert.*;
 import static org.mockito.Matchers.*;
 import static org.mockito.Mockito.*;
 
@@ -65,10 +62,11 @@ import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
-
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.nimbusds.jose.util.StringUtils;
+
 /**
  * Test that the Fetcher does what we expect it to.
  */
@@ -453,9 +451,9 @@ public class TestFetcher {
     ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
     when(connection.getInputStream()).thenReturn(in);
     // 8 < 10 therefore there appear to be extra bytes in the IFileInputStream
-    InMemoryMapOutput<Text,Text> mapOut = new InMemoryMapOutput<Text, Text>(
+    IFileWrappedMapOutput<Text,Text> mapOut = new InMemoryMapOutput<Text, Text>(
         job, map1ID, mm, 8, null, true );
-    InMemoryMapOutput<Text,Text> mapOut2 = new InMemoryMapOutput<Text, Text>(
+    IFileWrappedMapOutput<Text,Text> mapOut2 = new InMemoryMapOutput<Text, Text>(
         job, map2ID, mm, 10, null, true );
 
     when(mm.reserve(eq(map1ID), anyLong(), anyInt())).thenReturn(mapOut);
@@ -478,9 +476,9 @@ public class TestFetcher {
     Path shuffledToDisk =
         OnDiskMapOutput.getTempPath(onDiskMapOutputPath, fetcher);
     fs = FileSystem.getLocal(job).getRaw();
-    MapOutputFile mof = mock(MapOutputFile.class);
-    OnDiskMapOutput<Text,Text> odmo = new OnDiskMapOutput<Text,Text>(map1ID,
-        id, mm, 100L, job, mof, fetcher, true, fs, onDiskMapOutputPath);
+    IFileWrappedMapOutput<Text,Text> odmo =
+        new OnDiskMapOutput<Text,Text>(map1ID, mm, 100L, job, fetcher, true,
+                                       fs, onDiskMapOutputPath);
 
     String mapData = "MAPDATA12345678901234567890";
 
@@ -538,7 +536,7 @@ public class TestFetcher {
   @Test(timeout=10000)
   public void testInterruptInMemory() throws Exception {
     final int FETCHER = 2;
-    InMemoryMapOutput<Text,Text> immo = spy(new InMemoryMapOutput<Text,Text>(
+    IFileWrappedMapOutput<Text,Text> immo = spy(new InMemoryMapOutput<Text,Text>(
           job, id, mm, 100, null, true));
     when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
         .thenReturn(immo);
@@ -584,10 +582,9 @@ public class TestFetcher {
     Path p = new Path("file:///tmp/foo");
     Path pTmp = OnDiskMapOutput.getTempPath(p, FETCHER);
     FileSystem mFs = mock(FileSystem.class, RETURNS_DEEP_STUBS);
-    MapOutputFile mof = mock(MapOutputFile.class);
-    when(mof.getInputFileForWrite(any(TaskID.class), anyLong())).thenReturn(p);
-    OnDiskMapOutput<Text,Text> odmo = spy(new OnDiskMapOutput<Text,Text>(map1ID,
-        id, mm, 100L, job, mof, FETCHER, true, mFs, p));
+    IFileWrappedMapOutput<Text,Text> odmo =
+        spy(new OnDiskMapOutput<Text,Text>(map1ID, mm, 100L, job,
+                                           FETCHER, true, mFs, p));
     when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
         .thenReturn(odmo);
     doNothing().when(mm).waitForResource();


[49/50] hadoop git commit: HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers trying to use them. (Brahma Reddy Battula via stevel)

Posted by zj...@apache.org.
HADOOP-12052 IPC client downgrades all exception types to IOE, breaks callers trying to use them. (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77e5bae7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77e5bae7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77e5bae7

Branch: refs/heads/YARN-2928
Commit: 77e5bae7a19c4aeaf1adbb7034d488f4299f0447
Parents: ddf75e3
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jun 8 13:02:26 2015 +0100
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:02 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt              | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/Client.java          | 8 +++++++-
 2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e5bae7/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index eacc3be..79f3178 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -834,6 +834,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11924. Tolerate JDK-8047340-related exceptions in
     Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)
 
+    HADOOP-12052 IPC client downgrades all exception types to IOE, breaks
+    callers trying to use them. (Brahma Reddy Battula via stevel)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e5bae7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index feb811e..6996a51 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1484,7 +1484,13 @@ public class Client {
           }
         });
       } catch (ExecutionException e) {
-        throw new IOException(e);
+        Throwable cause = e.getCause();
+        // the underlying exception should normally be IOException
+        if (cause instanceof IOException) {
+          throw (IOException) cause;
+        } else {
+          throw new IOException(cause);
+        }
       }
       if (connection.addCall(call)) {
         break;


[21/50] hadoop git commit: YARN-41. The RM should handle the graceful shutdown of the NM. Contributed by Devaraj K.

Posted by zj...@apache.org.
YARN-41. The RM should handle the graceful shutdown of the NM. Contributed by Devaraj K.

Conflicts:
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/868b9ce8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/868b9ce8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/868b9ce8

Branch: refs/heads/YARN-2928
Commit: 868b9ce8ce5bdab49b6b01b6491e9778a202ed8e
Parents: 8732f97
Author: Junping Du <ju...@apache.org>
Authored: Thu Jun 4 04:59:27 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:50:09 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../hadoop/yarn/api/records/NodeState.java      |  10 +-
 .../src/main/proto/yarn_protos.proto            |   1 +
 .../hadoop/yarn/server/api/ResourceTracker.java |  16 ++-
 .../pb/client/ResourceTrackerPBClientImpl.java  |  18 +++
 .../service/ResourceTrackerPBServiceImpl.java   |  27 +++-
 .../UnRegisterNodeManagerRequest.java           |  38 ++++++
 .../UnRegisterNodeManagerResponse.java          |  30 +++++
 .../pb/UnRegisterNodeManagerRequestPBImpl.java  | 108 ++++++++++++++++
 .../pb/UnRegisterNodeManagerResponsePBImpl.java |  70 +++++++++++
 .../src/main/proto/ResourceTracker.proto        |   1 +
 .../yarn_server_common_service_protos.proto     |   7 ++
 .../yarn/TestResourceTrackerPBClientImpl.java   |  34 ++++-
 .../apache/hadoop/yarn/TestYSCRPCFactories.java |  10 +-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |  12 ++
 .../nodemanager/NodeStatusUpdaterImpl.java      |  33 +++++
 .../server/nodemanager/LocalRMInterface.java    |  10 ++
 .../nodemanager/MockNodeStatusUpdater.java      |   9 ++
 .../nodemanager/TestNodeStatusUpdater.java      |  44 +++++++
 .../TestNodeStatusUpdaterForLabels.java         |   8 ++
 .../server/resourcemanager/ClusterMetrics.java  |  14 +++
 .../resourcemanager/ResourceTrackerService.java |  23 ++++
 .../resourcemanager/rmnode/RMNodeEventType.java |   1 +
 .../resourcemanager/rmnode/RMNodeImpl.java      |  20 +++
 .../webapp/MetricsOverviewTable.java            |   2 +
 .../resourcemanager/webapp/NodesPage.java       |   1 +
 .../webapp/dao/ClusterMetricsInfo.java          |   8 +-
 .../resourcemanager/TestRMNodeTransitions.java  |  14 +++
 .../TestResourceTrackerService.java             | 123 ++++++++++++++++++-
 .../resourcemanager/webapp/TestNodesPage.java   |   4 +-
 .../webapp/TestRMWebServices.java               |  21 ++--
 .../hadoop/yarn/server/MiniYARNCluster.java     |  10 ++
 32 files changed, 702 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 61cc501..0c76206 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -217,6 +217,9 @@ Release 2.8.0 - UNRELEASED
     YARN-160. Enhanced NodeManager to automatically obtain cpu/memory values from
     underlying OS when configured to do so. (Varun Vasudev via vinodkv)
 
+    YARN-41. The RM should handle the graceful shutdown of the NM. (Devaraj K via 
+    junping_du)
+
   IMPROVEMENTS
 
     YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
index 741046c..d0344fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
@@ -46,9 +46,13 @@ public enum NodeState {
   REBOOTED,
 
   /** Node decommission is in progress */
-  DECOMMISSIONING;
-  
+  DECOMMISSIONING,
+
+  /** Node has shutdown gracefully. */
+  SHUTDOWN;
+
   public boolean isUnusable() {
-    return (this == UNHEALTHY || this == DECOMMISSIONED || this == LOST);
+    return (this == UNHEALTHY || this == DECOMMISSIONED
+        || this == LOST || this == SHUTDOWN);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index b9969b0..f801409 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -228,6 +228,7 @@ enum NodeStateProto {
   NS_LOST = 5;
   NS_REBOOTED = 6;
   NS_DECOMMISSIONING = 7;
+  NS_SHUTDOWN = 8;
 }
 
 message NodeIdProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
index ad8a625..c500130 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
@@ -26,16 +26,24 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 
+/**
+ * This is used by the Node Manager to register/nodeHeartbeat/unregister with
+ * the ResourceManager.
+ */
 public interface ResourceTracker {
   
   @Idempotent
-  public RegisterNodeManagerResponse registerNodeManager(
-      RegisterNodeManagerRequest request) throws YarnException,
-      IOException;
+  RegisterNodeManagerResponse registerNodeManager(
+      RegisterNodeManagerRequest request) throws YarnException, IOException;
 
   @AtMostOnce
-  public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
+  NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
       throws YarnException, IOException;
 
+  @Idempotent
+  UnRegisterNodeManagerResponse unRegisterNodeManager(
+      UnRegisterNodeManagerRequest request) throws YarnException, IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
index 40f6874..9756aed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java
@@ -29,16 +29,21 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProto;
 import org.apache.hadoop.yarn.server.api.ResourceTracker;
 import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerResponsePBImpl;
 
 import com.google.protobuf.ServiceException;
 
@@ -84,4 +89,17 @@ private ResourceTrackerPB proxy;
     }
   }
 
+  @Override
+  public UnRegisterNodeManagerResponse unRegisterNodeManager(
+      UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+    UnRegisterNodeManagerRequestProto requestProto =
+        ((UnRegisterNodeManagerRequestPBImpl) request).getProto();
+    try {
+      return new UnRegisterNodeManagerResponsePBImpl(
+          proxy.unRegisterNodeManager(null, requestProto));
+    } catch (ServiceException e) {
+      RPCUtil.unwrapAndThrowException(e);
+      return null;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
index 442e3c8..d79cf27 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java
@@ -25,14 +25,19 @@ import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatR
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerResponseProto;
 import org.apache.hadoop.yarn.server.api.ResourceTracker;
 import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerResponsePBImpl;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
@@ -53,9 +58,7 @@ public class ResourceTrackerPBServiceImpl implements ResourceTrackerPB {
     try {
       RegisterNodeManagerResponse response = real.registerNodeManager(request);
       return ((RegisterNodeManagerResponsePBImpl)response).getProto();
-    } catch (YarnException e) {
-      throw new ServiceException(e);
-    } catch (IOException e) {
+    } catch (YarnException | IOException e) {
       throw new ServiceException(e);
     }
   }
@@ -67,11 +70,23 @@ public class ResourceTrackerPBServiceImpl implements ResourceTrackerPB {
     try {
       NodeHeartbeatResponse response = real.nodeHeartbeat(request);
       return ((NodeHeartbeatResponsePBImpl)response).getProto();
-    } catch (YarnException e) {
-      throw new ServiceException(e);
-    } catch (IOException e) {
+    } catch (YarnException | IOException e) {
       throw new ServiceException(e);
     }
   }
 
+  @Override
+  public UnRegisterNodeManagerResponseProto unRegisterNodeManager(
+      RpcController controller, UnRegisterNodeManagerRequestProto proto)
+      throws ServiceException {
+    UnRegisterNodeManagerRequestPBImpl request =
+        new UnRegisterNodeManagerRequestPBImpl(proto);
+    try {
+      UnRegisterNodeManagerResponse response = real
+          .unRegisterNodeManager(request);
+      return ((UnRegisterNodeManagerResponsePBImpl) response).getProto();
+    } catch (YarnException | IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerRequest.java
new file mode 100644
index 0000000..7287464
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerRequest.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Node Manager's unregister request.
+ */
+public abstract class UnRegisterNodeManagerRequest {
+  public static UnRegisterNodeManagerRequest newInstance(NodeId nodeId) {
+    UnRegisterNodeManagerRequest nodeHeartbeatRequest = Records
+        .newRecord(UnRegisterNodeManagerRequest.class);
+    nodeHeartbeatRequest.setNodeId(nodeId);
+    return nodeHeartbeatRequest;
+  }
+
+  public abstract NodeId getNodeId();
+
+  public abstract void setNodeId(NodeId nodeId);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerResponse.java
new file mode 100644
index 0000000..84002e2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UnRegisterNodeManagerResponse.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Node Manager's unregister response.
+ */
+public abstract class UnRegisterNodeManagerResponse {
+  public static UnRegisterNodeManagerResponse newInstance() {
+    return Records.newRecord(UnRegisterNodeManagerResponse.class);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerRequestPBImpl.java
new file mode 100644
index 0000000..5f8196e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerRequestPBImpl.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+
+/**
+ * PBImpl class for UnRegisterNodeManagerRequest.
+ */
+public class UnRegisterNodeManagerRequestPBImpl extends
+    UnRegisterNodeManagerRequest {
+  private UnRegisterNodeManagerRequestProto proto =
+      UnRegisterNodeManagerRequestProto.getDefaultInstance();
+  private UnRegisterNodeManagerRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private NodeId nodeId = null;
+
+  public UnRegisterNodeManagerRequestPBImpl() {
+    builder = UnRegisterNodeManagerRequestProto.newBuilder();
+  }
+
+  public UnRegisterNodeManagerRequestPBImpl(
+      UnRegisterNodeManagerRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public UnRegisterNodeManagerRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.nodeId != null) {
+      builder.setNodeId(convertToProtoFormat(this.nodeId));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = UnRegisterNodeManagerRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public NodeId getNodeId() {
+    UnRegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.nodeId != null) {
+      return this.nodeId;
+    }
+    if (!p.hasNodeId()) {
+      return null;
+    }
+    this.nodeId = convertFromProtoFormat(p.getNodeId());
+    return this.nodeId;
+  }
+
+  @Override
+  public void setNodeId(NodeId updatedNodeId) {
+    maybeInitBuilder();
+    if (updatedNodeId == null) {
+      builder.clearNodeId();
+    }
+    this.nodeId = updatedNodeId;
+  }
+
+  private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
+    return new NodeIdPBImpl(p);
+  }
+
+  private NodeIdProto convertToProtoFormat(NodeId t) {
+    return ((NodeIdPBImpl) t).getProto();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerResponsePBImpl.java
new file mode 100644
index 0000000..707b6d8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UnRegisterNodeManagerResponsePBImpl.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.UnRegisterNodeManagerResponseProto;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
+
+/**
+ * PBImpl class for UnRegisterNodeManagerResponse.
+ */
+public class UnRegisterNodeManagerResponsePBImpl extends
+    UnRegisterNodeManagerResponse {
+  private UnRegisterNodeManagerResponseProto proto =
+      UnRegisterNodeManagerResponseProto.getDefaultInstance();
+  private UnRegisterNodeManagerResponseProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private boolean rebuild = false;
+
+  public UnRegisterNodeManagerResponsePBImpl() {
+    builder = UnRegisterNodeManagerResponseProto.newBuilder();
+  }
+
+  public UnRegisterNodeManagerResponsePBImpl(
+      UnRegisterNodeManagerResponseProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public UnRegisterNodeManagerResponseProto getProto() {
+    if (rebuild) {
+      mergeLocalToProto();
+    }
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    proto = builder.build();
+    rebuild = false;
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = UnRegisterNodeManagerResponseProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
index 1f91b63..7487184 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
@@ -27,4 +27,5 @@ import "yarn_server_common_service_protos.proto";
 service ResourceTrackerService {
   rpc registerNodeManager(RegisterNodeManagerRequestProto) returns (RegisterNodeManagerResponseProto);
   rpc nodeHeartbeat(NodeHeartbeatRequestProto) returns (NodeHeartbeatResponseProto);
+  rpc unRegisterNodeManager(UnRegisterNodeManagerRequestProto) returns (UnRegisterNodeManagerResponseProto);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index 94dd46a..22c4cf6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -49,6 +49,13 @@ message RegisterNodeManagerResponseProto {
   optional bool areNodeLabelsAcceptedByRM = 7 [default = false];
 }
 
+message UnRegisterNodeManagerRequestProto {
+  optional NodeIdProto node_id = 1;
+}
+
+message UnRegisterNodeManagerResponseProto {
+}
+
 message NodeHeartbeatRequestProto {
   optional NodeStatusProto node_status = 1;
   optional MasterKeyProto last_known_container_token_master_key = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestResourceTrackerPBClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestResourceTrackerPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestResourceTrackerPBClientImpl.java
index bc89e66..3b5ef08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestResourceTrackerPBClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestResourceTrackerPBClientImpl.java
@@ -23,6 +23,7 @@ import java.net.InetSocketAddress;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
 import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
@@ -32,9 +33,12 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
 
 /**
@@ -116,7 +120,27 @@ public class TestResourceTrackerPBClientImpl {
 
   }
 
-  
+  /**
+   * Test the method unRegisterNodeManager. Method should return a not null
+   * result.
+   *
+   */
+  @Test
+  public void testUnRegisterNodeManager() throws Exception {
+    UnRegisterNodeManagerRequest request = UnRegisterNodeManagerRequest
+        .newInstance(NodeId.newInstance("host1", 1234));
+    assertNotNull(client.unRegisterNodeManager(request));
+
+    ResourceTrackerTestImpl.exception = true;
+    try {
+      client.unRegisterNodeManager(request);
+      fail("there  should be YarnException");
+    } catch (YarnException e) {
+      assertTrue(e.getMessage().startsWith("testMessage"));
+    } finally {
+      ResourceTrackerTestImpl.exception = false;
+    }
+  }
 
   public static class ResourceTrackerTestImpl implements ResourceTracker {
 
@@ -140,5 +164,13 @@ public class TestResourceTrackerPBClientImpl {
       return recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
     }
 
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      if (exception) {
+        throw new YarnException("testMessage");
+      }
+      return UnRegisterNodeManagerResponse.newInstance();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRPCFactories.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRPCFactories.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRPCFactories.java
index a0cc085..9906b46 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRPCFactories.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYSCRPCFactories.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.junit.Test;
 
 public class TestYSCRPCFactories {
@@ -115,6 +117,12 @@ public class TestYSCRPCFactories {
       // TODO Auto-generated method stub
       return null;
     }
-    
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      // TODO Auto-generated method stub
+      return null;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index 779bfee..c75155b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRe
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
@@ -311,6 +312,17 @@ public class TestYarnServerApiClasses {
     Assert.assertEquals(0, copy.getNodeLabels().size());
   }
 
+  @Test
+  public void testUnRegisterNodeManagerRequestPBImpl() throws Exception {
+    UnRegisterNodeManagerRequestPBImpl request = new UnRegisterNodeManagerRequestPBImpl();
+    NodeId nodeId = NodeId.newInstance("host", 1234);
+    request.setNodeId(nodeId);
+
+    UnRegisterNodeManagerRequestPBImpl copy = new UnRegisterNodeManagerRequestPBImpl(
+        request.getProto());
+    Assert.assertEquals(nodeId, copy.getNodeId());
+  }
+
   private HashSet<NodeLabel> getValidNodeLabels() {
     HashSet<NodeLabel> nodeLabels = new HashSet<NodeLabel>();
     nodeLabels.add(NodeLabel.newInstance("java"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 7762bcf..e793432 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -57,6 +57,8 @@ import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.server.api.ResourceManagerConstants;
 import org.apache.hadoop.yarn.server.api.ResourceTracker;
@@ -67,6 +69,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
@@ -132,6 +135,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
   private Runnable statusUpdaterRunnable;
   private Thread  statusUpdater;
   private long rmIdentifier = ResourceManagerConstants.RM_INVALID_IDENTIFIER;
+  private boolean registeredWithRM = false;
   Set<ContainerId> pendingContainersToRemove = new HashSet<ContainerId>();
 
   private final NodeLabelsProvider nodeLabelsProvider;
@@ -234,12 +238,40 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
 
   @Override
   protected void serviceStop() throws Exception {
+    // the isStopped check is for avoiding multiple unregistrations.
+    if (this.registeredWithRM && !this.isStopped
+        && !isNMUnderSupervisionWithRecoveryEnabled()
+        && !context.getDecommissioned()) {
+      unRegisterNM();
+    }
     // Interrupt the updater.
     this.isStopped = true;
     stopRMProxy();
     super.serviceStop();
   }
 
+  private boolean isNMUnderSupervisionWithRecoveryEnabled() {
+    Configuration config = getConfig();
+    return config.getBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,
+        YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED)
+        && config.getBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED,
+            YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED);
+  }
+
+  private void unRegisterNM() {
+    RecordFactory recordFactory = RecordFactoryPBImpl.get();
+    UnRegisterNodeManagerRequest request = recordFactory
+        .newRecordInstance(UnRegisterNodeManagerRequest.class);
+    request.setNodeId(this.nodeId);
+    try {
+      resourceTracker.unRegisterNodeManager(request);
+      LOG.info("Successfully Unregistered the Node " + this.nodeId
+          + " with ResourceManager.");
+    } catch (Exception e) {
+      LOG.warn("Unregistration of the Node " + this.nodeId + " failed.", e);
+    }
+  }
+
   protected void rebootNodeStatusUpdaterAndRegisterWithRM() {
     // Interrupt the updater.
     this.isStopped = true;
@@ -329,6 +361,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
             + "version error, " + message);
       }
     }
+    this.registeredWithRM = true;
     MasterKey masterKey = regNMResponse.getContainerTokenMasterKey();
     // do this now so that its set before we start heartbeating to RM
     // It is expected that status updater is started by this point and

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java
index 4b5f040..3843032 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/LocalRMInterface.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
 
@@ -57,4 +59,12 @@ public class LocalRMInterface implements ResourceTracker {
     NodeHeartbeatResponse response = recordFactory.newRecordInstance(NodeHeartbeatResponse.class);
     return response;
   }
+
+  @Override
+  public UnRegisterNodeManagerResponse unRegisterNodeManager(
+      UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+    UnRegisterNodeManagerResponse response = recordFactory
+        .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    return response;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
index 3f4091c..50487c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
@@ -100,5 +102,12 @@ public class MockNodeStatusUpdater extends NodeStatusUpdaterImpl {
               null, null, null, 1000L);
       return nhResponse;
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return recordFactory
+          .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index 7c71b31..ebbd999 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -85,6 +85,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
@@ -295,6 +297,13 @@ public class TestNodeStatusUpdater {
             1000L);
       return nhResponse;
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return recordFactory
+          .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    }
   }
 
   private class MyNodeStatusUpdater extends NodeStatusUpdaterImpl {
@@ -515,6 +524,13 @@ public class TestNodeStatusUpdater {
       nhResponse.setDiagnosticsMessage(shutDownMessage);
       return nhResponse;
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return recordFactory
+          .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    }
   }
 
   private class MyResourceTracker3 implements ResourceTracker {
@@ -570,6 +586,13 @@ public class TestNodeStatusUpdater {
       }
       return nhResponse;
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return recordFactory
+          .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    }
   }
 
   // Test NodeStatusUpdater sends the right container statuses each time it
@@ -738,6 +761,13 @@ public class TestNodeStatusUpdater {
       nhResponse.setSystemCredentialsForApps(appCredentials);
       return nhResponse;
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return recordFactory
+          .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    }
   }
 
   private class MyResourceTracker5 implements ResourceTracker {
@@ -768,6 +798,13 @@ public class TestNodeStatusUpdater {
           "NodeHeartbeat exception");
       }
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return recordFactory
+          .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    }
   }
 
   private class MyResourceTracker6 implements ResourceTracker {
@@ -820,6 +857,13 @@ public class TestNodeStatusUpdater {
               null, null, null, 1000L);
       return nhResponse;
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return recordFactory
+          .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    }
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
index a0ed39b..7e1bbd8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdaterForLabels.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
@@ -181,6 +183,12 @@ public class TestNodeStatusUpdaterForLabels extends NodeLabelTestBase {
       }
       return nhResponse;
     }
+
+    @Override
+    public UnRegisterNodeManagerResponse unRegisterNodeManager(
+        UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+      return null;
+    }
   }
 
   public static class DummyNodeLabelsProvider extends NodeLabelsProvider {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java
index 5fa36bc..1114dc0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClusterMetrics.java
@@ -44,6 +44,7 @@ public class ClusterMetrics {
   @Metric("# of lost NMs") MutableGaugeInt numLostNMs;
   @Metric("# of unhealthy NMs") MutableGaugeInt numUnhealthyNMs;
   @Metric("# of Rebooted NMs") MutableGaugeInt numRebootedNMs;
+  @Metric("# of Shutdown NMs") MutableGaugeInt numShutdownNMs;
   @Metric("AM container launch delay") MutableRate aMLaunchDelay;
   @Metric("AM register delay") MutableRate aMRegisterDelay;
 
@@ -142,6 +143,19 @@ public class ClusterMetrics {
     numRebootedNMs.decr();
   }
 
+  // Shutdown NMs
+  public int getNumShutdownNMs() {
+    return numShutdownNMs.value();
+  }
+
+  public void incrNumShutdownNMs() {
+    numShutdownNMs.incr();
+  }
+
+  public void decrNumShutdownNMs() {
+    numShutdownNMs.decr();
+  }
+
   public void incrNumActiveNodes() {
     numActiveNMs.incr();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index acb690b..5e240e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
@@ -510,6 +512,27 @@ public class ResourceTrackerService extends AbstractService implements
     return nodeHeartBeatResponse;
   }
 
+  @SuppressWarnings("unchecked")
+  @Override
+  public UnRegisterNodeManagerResponse unRegisterNodeManager(
+      UnRegisterNodeManagerRequest request) throws YarnException, IOException {
+    UnRegisterNodeManagerResponse response = recordFactory
+        .newRecordInstance(UnRegisterNodeManagerResponse.class);
+    NodeId nodeId = request.getNodeId();
+    RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
+    if (rmNode == null) {
+      LOG.info("Node not found, ignoring the unregister from node id : "
+          + nodeId);
+      return response;
+    }
+    LOG.info("Node with node id : " + nodeId
+        + " has shutdown, hence unregistering the node.");
+    this.nmLivelinessMonitor.unregister(nodeId);
+    this.rmContext.getDispatcher().getEventHandler()
+        .handle(new RMNodeEvent(nodeId, RMNodeEventType.SHUTDOWN));
+    return response;
+  }
+
   private void setAppCollectorsMapToResponse(
       List<ApplicationId> liveApps, NodeHeartbeatResponse response) {
     Map<ApplicationId, String> liveAppCollectorsMap = new

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
index 3ab54a7..27ba1c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
@@ -34,6 +34,7 @@ public enum RMNodeEventType {
   STATUS_UPDATE,
   REBOOTING,
   RECONNECTED,
+  SHUTDOWN,
 
   // Source: Application
   CLEANUP_APP,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index a11aacf..1263692 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -168,6 +168,9 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
          RMNodeEventType.RECONNECTED, new ReconnectNodeTransition())
      .addTransition(NodeState.RUNNING, NodeState.RUNNING,
          RMNodeEventType.RESOURCE_UPDATE, new UpdateNodeResourceWhenRunningTransition())
+     .addTransition(NodeState.RUNNING, NodeState.SHUTDOWN,
+         RMNodeEventType.SHUTDOWN,
+         new DeactivateNodeTransition(NodeState.SHUTDOWN))
 
      //Transitions from REBOOTED state
      .addTransition(NodeState.REBOOTED, NodeState.REBOOTED,
@@ -215,6 +218,17 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
      .addTransition(NodeState.UNHEALTHY, NodeState.UNHEALTHY,
          RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM,
          new AddContainersToBeRemovedFromNMTransition())
+     .addTransition(NodeState.UNHEALTHY, NodeState.SHUTDOWN,
+         RMNodeEventType.SHUTDOWN,
+         new DeactivateNodeTransition(NodeState.SHUTDOWN))
+
+     //Transitions from SHUTDOWN state
+     .addTransition(NodeState.SHUTDOWN, NodeState.SHUTDOWN,
+         RMNodeEventType.RESOURCE_UPDATE,
+         new UpdateNodeResourceWhenUnusableTransition())
+     .addTransition(NodeState.SHUTDOWN, NodeState.SHUTDOWN,
+         RMNodeEventType.FINISHED_CONTAINERS_PULLED_BY_AM,
+         new AddContainersToBeRemovedFromNMTransition())
 
      // create the topology tables
      .installTopology(); 
@@ -450,6 +464,9 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
     case UNHEALTHY:
       metrics.decrNumUnhealthyNMs();
       break;
+    case SHUTDOWN:
+      metrics.decrNumShutdownNMs();
+      break;
     default:
       LOG.debug("Unexpected previous node state");    
     }
@@ -483,6 +500,9 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
     case UNHEALTHY:
       metrics.incrNumUnhealthyNMs();
       break;
+    case SHUTDOWN:
+      metrics.incrNumShutdownNMs();
+      break;
     default:
       LOG.debug("Unexpected final state");
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
index 7ee2ca4..a5a9a7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
@@ -78,6 +78,7 @@ public class MetricsOverviewTable extends HtmlBlock {
         th().$class("ui-state-default")._("Lost Nodes")._().
         th().$class("ui-state-default")._("Unhealthy Nodes")._().
         th().$class("ui-state-default")._("Rebooted Nodes")._().
+        th().$class("ui-state-default")._("Shutdown Nodes")._().
       _().
     _().
     tbody().$class("ui-widget-content").
@@ -103,6 +104,7 @@ public class MetricsOverviewTable extends HtmlBlock {
         td().a(url("nodes/lost"),String.valueOf(clusterMetrics.getLostNodes()))._().
         td().a(url("nodes/unhealthy"),String.valueOf(clusterMetrics.getUnhealthyNodes()))._().
         td().a(url("nodes/rebooted"),String.valueOf(clusterMetrics.getRebootedNodes()))._().
+        td().a(url("nodes/shutdown"),String.valueOf(clusterMetrics.getShutdownNodes()))._().
       _().
     _()._();
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index a2bab0c..4214667 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -90,6 +90,7 @@ class NodesPage extends RmView {
         case DECOMMISSIONED:
         case LOST:
         case REBOOTED:
+        case SHUTDOWN:
           rmNodes = this.rm.getRMContext().getInactiveRMNodes().values();
           isInactive = true;
           break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
index 16a5c01..5ebae41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
@@ -57,6 +57,7 @@ public class ClusterMetricsInfo {
   protected int decommissionedNodes;
   protected int rebootedNodes;
   protected int activeNodes;
+  protected int shutdownNodes;
 
   public ClusterMetricsInfo() {
   } // JAXB needs this
@@ -92,8 +93,9 @@ public class ClusterMetricsInfo {
     this.unhealthyNodes = clusterMetrics.getUnhealthyNMs();
     this.decommissionedNodes = clusterMetrics.getNumDecommisionedNMs();
     this.rebootedNodes = clusterMetrics.getNumRebootedNMs();
+    this.shutdownNodes = clusterMetrics.getNumShutdownNMs();
     this.totalNodes = activeNodes + lostNodes + decommissionedNodes
-        + rebootedNodes + unhealthyNodes;
+        + rebootedNodes + unhealthyNodes + shutdownNodes;
   }
 
   public int getAppsSubmitted() {
@@ -188,4 +190,8 @@ public class ClusterMetricsInfo {
     return this.decommissionedNodes;
   }
 
+  public int getShutdownNodes() {
+    return this.shutdownNodes;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index fb9d2ef..01f4357 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -465,6 +465,20 @@ public class TestRMNodeTransitions {
     Assert.assertEquals(NodeState.REBOOTED, node.getState());
   }
 
+  @Test
+  public void testNMShutdown() {
+    RMNodeImpl node = getRunningNode();
+    node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.SHUTDOWN));
+    Assert.assertEquals(NodeState.SHUTDOWN, node.getState());
+  }
+
+  @Test
+  public void testUnhealthyNMShutdown() {
+    RMNodeImpl node = getUnhealthyNode();
+    node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.SHUTDOWN));
+    Assert.assertEquals(NodeState.SHUTDOWN, node.getState());
+  }
+
   @Test(timeout=20000)
   public void testUpdateHeartbeatResponseForCleanup() {
     RMNodeImpl node = getRunningNode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 3474ed6..94a0e4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
@@ -921,7 +922,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
         ClusterMetrics.getMetrics().getUnhealthyNMs());
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings({ "unchecked", "rawtypes" })
   @Test
   public void testHandleContainerStatusInvalidCompletions() throws Exception {
     rm = new MockRM(new YarnConfiguration());
@@ -1075,6 +1076,113 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
 
   }
 
+  @Test
+  public void testNMUnregistration() throws Exception {
+    Configuration conf = new Configuration();
+    rm = new MockRM(conf);
+    rm.start();
+
+    ResourceTrackerService resourceTrackerService = rm
+        .getResourceTrackerService();
+    MockNM nm1 = rm.registerNode("host1:1234", 5120);
+
+    int shutdownNMsCount = ClusterMetrics.getMetrics()
+        .getNumShutdownNMs();
+    NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
+    Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
+
+    UnRegisterNodeManagerRequest request = Records
+        .newRecord(UnRegisterNodeManagerRequest.class);
+    request.setNodeId(nm1.getNodeId());
+    resourceTrackerService.unRegisterNodeManager(request);
+    checkShutdownNMCount(rm, ++shutdownNMsCount);
+
+    // The RM should remove the node after unregistration, hence send a reboot
+    // command.
+    nodeHeartbeat = nm1.nodeHeartbeat(true);
+    Assert.assertTrue(NodeAction.RESYNC.equals(nodeHeartbeat.getNodeAction()));
+  }
+
+  @Test
+  public void testUnhealthyNMUnregistration() throws Exception {
+    Configuration conf = new Configuration();
+    rm = new MockRM(conf);
+    rm.start();
+
+    ResourceTrackerService resourceTrackerService = rm
+        .getResourceTrackerService();
+    MockNM nm1 = rm.registerNode("host1:1234", 5120);
+    Assert.assertEquals(0, ClusterMetrics.getMetrics().getUnhealthyNMs());
+    // node healthy
+    nm1.nodeHeartbeat(true);
+    int shutdownNMsCount = ClusterMetrics.getMetrics().getNumShutdownNMs();
+
+    // node unhealthy
+    nm1.nodeHeartbeat(false);
+    checkUnealthyNMCount(rm, nm1, true, 1);
+    UnRegisterNodeManagerRequest request = Records
+        .newRecord(UnRegisterNodeManagerRequest.class);
+    request.setNodeId(nm1.getNodeId());
+    resourceTrackerService.unRegisterNodeManager(request);
+    checkShutdownNMCount(rm, ++shutdownNMsCount);
+  }
+
+  @Test
+  public void testInvalidNMUnregistration() throws Exception {
+    Configuration conf = new Configuration();
+    rm = new MockRM(conf);
+    rm.start();
+    ResourceTrackerService resourceTrackerService = rm
+        .getResourceTrackerService();
+    int shutdownNMsCount = ClusterMetrics.getMetrics()
+        .getNumShutdownNMs();
+    int decommisionedNMsCount = ClusterMetrics.getMetrics()
+        .getNumDecommisionedNMs();
+
+    // Node not found for unregister
+    UnRegisterNodeManagerRequest request = Records
+        .newRecord(UnRegisterNodeManagerRequest.class);
+    request.setNodeId(BuilderUtils.newNodeId("host", 1234));
+    resourceTrackerService.unRegisterNodeManager(request);
+    checkShutdownNMCount(rm, 0);
+    checkDecommissionedNMCount(rm, 0);
+
+    // 1. Register the Node Manager
+    // 2. Exclude the same Node Manager host
+    // 3. Give NM heartbeat to RM
+    // 4. Unregister the Node Manager
+    MockNM nm1 = new MockNM("host1:1234", 5120, resourceTrackerService);
+    RegisterNodeManagerResponse response = nm1.registerNode();
+    Assert.assertEquals(NodeAction.NORMAL, response.getNodeAction());
+    writeToHostsFile("host2");
+    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,
+        hostFile.getAbsolutePath());
+    rm.getNodesListManager().refreshNodes(conf);
+    NodeHeartbeatResponse heartbeatResponse = nm1.nodeHeartbeat(true);
+    Assert.assertEquals(NodeAction.SHUTDOWN, heartbeatResponse.getNodeAction());
+    checkShutdownNMCount(rm, shutdownNMsCount);
+    checkDecommissionedNMCount(rm, ++decommisionedNMsCount);
+    request.setNodeId(nm1.getNodeId());
+    resourceTrackerService.unRegisterNodeManager(request);
+    checkShutdownNMCount(rm, shutdownNMsCount);
+    checkDecommissionedNMCount(rm, decommisionedNMsCount);
+
+    // 1. Register the Node Manager
+    // 2. Exclude the same Node Manager host
+    // 3. Unregister the Node Manager
+    MockNM nm2 = new MockNM("host2:1234", 5120, resourceTrackerService);
+    RegisterNodeManagerResponse response2 = nm2.registerNode();
+    Assert.assertEquals(NodeAction.NORMAL, response2.getNodeAction());
+    writeToHostsFile("host1");
+    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,
+        hostFile.getAbsolutePath());
+    rm.getNodesListManager().refreshNodes(conf);
+    request.setNodeId(nm2.getNodeId());
+    resourceTrackerService.unRegisterNodeManager(request);
+    checkShutdownNMCount(rm, shutdownNMsCount);
+    checkDecommissionedNMCount(rm, ++decommisionedNMsCount);
+  }
+
   private void writeToHostsFile(String... hosts) throws IOException {
     if (!hostFile.exists()) {
       TEMP_DIR.mkdirs();
@@ -1110,6 +1218,19 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
         ClusterMetrics.getMetrics().getNumDecommisionedNMs());
   }
 
+  private void checkShutdownNMCount(MockRM rm, int count)
+      throws InterruptedException {
+    int waitCount = 0;
+    while (ClusterMetrics.getMetrics().getNumShutdownNMs() != count
+        && waitCount++ < 20) {
+      synchronized (this) {
+        wait(100);
+      }
+    }
+    Assert.assertEquals("The shutdown metrics are not updated", count,
+        ClusterMetrics.getMetrics().getNumShutdownNMs());
+  }
+
   @After
   public void tearDown() {
     if (hostFile != null && hostFile.exists()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
index a002db7..7c6d9a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
@@ -40,7 +40,7 @@ import com.google.inject.Module;
 public class TestNodesPage {
   
   final int numberOfRacks = 2;
-  final int numberOfNodesPerRack = 7;
+  final int numberOfNodesPerRack = 8;
   // The following is because of the way TestRMWebApp.mockRMContext creates
   // nodes.
   final int numberOfLostNodesPerRack = numberOfNodesPerRack
@@ -48,7 +48,7 @@ public class TestNodesPage {
 
   // Number of Actual Table Headers for NodesPage.NodesBlock might change in
   // future. In that case this value should be adjusted to the new value.
-  final int numberOfThInMetricsTable = 20;
+  final int numberOfThInMetricsTable = 21;
   final int numberOfActualTableHeaders = 13;
 
   private Injector injector;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index cd1d771..752e99b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -416,7 +416,8 @@ public class TestRMWebServices extends JerseyTestBase {
           WebServicesTestUtils.getXmlInt(element, "unhealthyNodes"),
           WebServicesTestUtils.getXmlInt(element, "decommissionedNodes"),
           WebServicesTestUtils.getXmlInt(element, "rebootedNodes"),
-          WebServicesTestUtils.getXmlInt(element, "activeNodes"));
+          WebServicesTestUtils.getXmlInt(element, "activeNodes"),
+          WebServicesTestUtils.getXmlInt(element, "shutdownNodes"));
     }
   }
 
@@ -424,7 +425,7 @@ public class TestRMWebServices extends JerseyTestBase {
       Exception {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject clusterinfo = json.getJSONObject("clusterMetrics");
-    assertEquals("incorrect number of elements", 23, clusterinfo.length());
+    assertEquals("incorrect number of elements", 24, clusterinfo.length());
     verifyClusterMetrics(
         clusterinfo.getInt("appsSubmitted"), clusterinfo.getInt("appsCompleted"),
         clusterinfo.getInt("reservedMB"), clusterinfo.getInt("availableMB"),
@@ -435,16 +436,16 @@ public class TestRMWebServices extends JerseyTestBase {
         clusterinfo.getInt("totalMB"), clusterinfo.getInt("totalNodes"),
         clusterinfo.getInt("lostNodes"), clusterinfo.getInt("unhealthyNodes"),
         clusterinfo.getInt("decommissionedNodes"),
-        clusterinfo.getInt("rebootedNodes"),clusterinfo.getInt("activeNodes"));
+        clusterinfo.getInt("rebootedNodes"),clusterinfo.getInt("activeNodes"),
+        clusterinfo.getInt("shutdownNodes"));
   }
 
   public void verifyClusterMetrics(int submittedApps, int completedApps,
-      int reservedMB, int availableMB,
-      int allocMB, int reservedVirtualCores, int availableVirtualCores, 
-      int allocVirtualCores, int totalVirtualCores,
-      int containersAlloc, int totalMB, int totalNodes,
-      int lostNodes, int unhealthyNodes, int decommissionedNodes,
-      int rebootedNodes, int activeNodes) throws JSONException, Exception {
+      int reservedMB, int availableMB, int allocMB, int reservedVirtualCores,
+      int availableVirtualCores, int allocVirtualCores, int totalVirtualCores,
+      int containersAlloc, int totalMB, int totalNodes, int lostNodes,
+      int unhealthyNodes, int decommissionedNodes, int rebootedNodes,
+      int activeNodes, int shutdownNodes) throws JSONException, Exception {
 
     ResourceScheduler rs = rm.getResourceScheduler();
     QueueMetrics metrics = rs.getRootQueueMetrics();
@@ -488,6 +489,8 @@ public class TestRMWebServices extends JerseyTestBase {
         clusterMetrics.getNumRebootedNMs(), rebootedNodes);
     assertEquals("activeNodes doesn't match", clusterMetrics.getNumActiveNMs(),
         activeNodes);
+    assertEquals("shutdownNodes doesn't match",
+        clusterMetrics.getNumShutdownNMs(), shutdownNodes);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/868b9ce8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 7fb0698..991fcd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -54,6 +54,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore;
@@ -644,6 +646,14 @@ public class MiniYARNCluster extends CompositeService {
               }
               return response;
             }
+
+            @Override
+            public UnRegisterNodeManagerResponse unRegisterNodeManager(
+                UnRegisterNodeManagerRequest request) throws YarnException,
+                IOException {
+              return recordFactory
+                  .newRecordInstance(UnRegisterNodeManagerResponse.class);
+            }
           };
         }
 


[13/50] hadoop git commit: MAPREDUCE-6374. Distributed Cache File visibility should check permission of full path. Contributed by Chang Li

Posted by zj...@apache.org.
MAPREDUCE-6374. Distributed Cache File visibility should check permission of full path. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95dd42b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95dd42b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95dd42b4

Branch: refs/heads/YARN-2928
Commit: 95dd42b458d026371eb0c4109dff6d8da654f206
Parents: d6e1fd0
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Jun 3 20:19:27 2015 +0000
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:14 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 +++
 .../ClientDistributedCacheManager.java          |  1 +
 .../TestClientDistributedCacheManager.java      | 28 ++++++++++++++++++++
 3 files changed, 32 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95dd42b4/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index c0df835..ba94324 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -461,6 +461,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6204. TestJobCounters should use new properties instead of
     JobConf.MAPRED_TASK_JAVA_OPTS. (Sam Liu via ozawa)
 
+    MAPREDUCE-6374. Distributed Cache File visibility should check permission
+    of full path (Chang Li via jlowe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95dd42b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
index 23f3cfc..c15e647 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
@@ -236,6 +236,7 @@ public class ClientDistributedCacheManager {
       Map<URI, FileStatus> statCache) throws IOException {
     FileSystem fs = FileSystem.get(uri, conf);
     Path current = new Path(uri.getPath());
+    current = fs.makeQualified(current);
     //the leaf level file should be readable by others
     if (!checkPermissionOfOther(fs, current, FsAction.READ, statCache)) {
       return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95dd42b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
index 4824ba3..902cbfc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
@@ -47,9 +48,13 @@ public class TestClientDistributedCacheManager {
       new File(System.getProperty("test.build.data", "/tmp")).toURI()
       .toString().replace(' ', '+');
   
+  private static final String TEST_VISIBILITY_DIR =
+      new File(TEST_ROOT_DIR, "TestCacheVisibility").toURI()
+      .toString().replace(' ', '+');
   private FileSystem fs;
   private Path firstCacheFile;
   private Path secondCacheFile;
+  private Path thirdCacheFile;
   private Configuration conf;
   
   @Before
@@ -58,8 +63,10 @@ public class TestClientDistributedCacheManager {
     fs = FileSystem.get(conf);
     firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile");
     secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile");
+    thirdCacheFile = new Path(TEST_VISIBILITY_DIR,"thirdCachefile");
     createTempFile(firstCacheFile, conf);
     createTempFile(secondCacheFile, conf);
+    createTempFile(thirdCacheFile, conf);
   }
   
   @After
@@ -70,6 +77,9 @@ public class TestClientDistributedCacheManager {
     if (!fs.delete(secondCacheFile, false)) {
       LOG.warn("Failed to delete secondcachefile");
     }
+    if (!fs.delete(thirdCacheFile, false)) {
+      LOG.warn("Failed to delete thirdCachefile");
+    }
   }
   
   @Test
@@ -93,6 +103,24 @@ public class TestClientDistributedCacheManager {
     Assert.assertEquals(expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
   }
   
+  @Test
+  public void testDetermineCacheVisibilities() throws IOException {
+    Path workingdir = new Path(TEST_VISIBILITY_DIR);
+    fs.setWorkingDirectory(workingdir);
+    fs.setPermission(workingdir, new FsPermission((short)00777));
+    fs.setPermission(new Path(TEST_ROOT_DIR), new FsPermission((short)00700));
+    Job job = Job.getInstance(conf);
+    Path relativePath = new Path("thirdCachefile");
+    job.addCacheFile(relativePath.toUri());
+    Configuration jobConf = job.getConfiguration();
+
+    Map<URI, FileStatus> statCache = new HashMap<URI, FileStatus>();
+    ClientDistributedCacheManager.
+        determineCacheVisibilities(jobConf, statCache);
+    Assert.assertFalse(jobConf.
+               getBoolean(MRJobConfig.CACHE_FILE_VISIBILITIES,true));
+  }
+
   @SuppressWarnings("deprecation")
   void createTempFile(Path p, Configuration conf) throws IOException {
     SequenceFile.Writer writer = null;


[10/50] hadoop git commit: HDFS-8523. Remove usage information on unsupported operation 'fsck -showprogress' from branch-2 (Contributed by J.Andreina)

Posted by zj...@apache.org.
HDFS-8523. Remove usage information on unsupported operation 'fsck -showprogress' from branch-2 (Contributed by J.Andreina)

Merged CHANGES.txt from branch-2.7

(cherry picked from commit 0ed9c2d8fec93b5dac9c305eda272ad8dfd869a9)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md

(cherry picked from commit dd98cfd328dddb01a1220786d28a80195021611b)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6de67969
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6de67969
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6de67969

Branch: refs/heads/YARN-2928
Commit: 6de679697cc91d4a337d420b5e4e5ad994df150b
Parents: b8dd317
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jun 3 15:15:44 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:13 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6de67969/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8cbe0e5..3e25129 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -942,6 +942,9 @@ Release 2.7.1 - UNRELEASED
     HDFS-8270. create() always retried with hardcoded timeout when file already
     exists with open lease (J.Andreina via vinayakumarb)
 
+    HDFS-8523. Remove usage information on unsupported operation
+    "fsck -showprogress" from branch-2 (J.Andreina via vinayakumarb)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES


[25/50] hadoop git commit: HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages. Contributed by Kazuho Fujii.

Posted by zj...@apache.org.
HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages. Contributed by Kazuho Fujii.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea1a48ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea1a48ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea1a48ad

Branch: refs/heads/YARN-2928
Commit: ea1a48adee9c865785d93551d2dd0db65368adbd
Parents: 868b9ce
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jun 5 01:45:34 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:56 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 .../hadoop-common/src/site/markdown/CommandsManual.md            | 4 ++--
 .../src/site/markdown/MapredCommands.md                          | 4 ++--
 3 files changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1a48ad/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf35cfe..3bca0bc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -858,6 +858,9 @@ Release 2.7.1 - UNRELEASED
     HADOOP-11934. Use of JavaKeyStoreProvider in LdapGroupsMapping causes
     infinite loop. (Larry McCay via cnauroth)
 
+    HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages.
+    (Kazuho Fujii via aajisaka)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1a48ad/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 35081a6..d7f0657 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -98,7 +98,7 @@ Commands useful for users of a hadoop cluster.
 
 ### `archive`
 
-Creates a hadoop archive. More information can be found at [Hadoop Archives Guide](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/HadoopArchives.html).
+Creates a hadoop archive. More information can be found at [Hadoop Archives Guide](../../hadoop-archives/HadoopArchives.html).
 
 ### `checknative`
 
@@ -157,7 +157,7 @@ Change the ownership and permissions on many files at once.
 
 ### `distcp`
 
-Copy file or directories recursively. More information can be found at [Hadoop DistCp Guide](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/DistCp.html).
+Copy file or directories recursively. More information can be found at [Hadoop DistCp Guide](../../hadoop-distcp/DistCp.html).
 
 ### `fs`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea1a48ad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
index ab0dc9d..9ccee60 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapredCommands.md
@@ -51,7 +51,7 @@ Commands useful for users of a hadoop cluster.
 ### `archive`
 
 Creates a hadoop archive. More information can be found at
-[Hadoop Archives Guide](./HadoopArchives.html).
+[Hadoop Archives Guide](../../hadoop-archives/HadoopArchives.html).
 
 ### `classpath`
 
@@ -62,7 +62,7 @@ Usage: `mapred classpath`
 ### `distcp`
 
 Copy file or directories recursively. More information can be found at
-[Hadoop DistCp Guide](./DistCp.html).
+[Hadoop DistCp Guide](../../hadoop-distcp/DistCp.html).
 
 ### `job`
 


[16/50] hadoop git commit: YARN-3749. We should make a copy of configuration when init MiniYARNCluster with multiple RMs. Contributed by Chun Chen

Posted by zj...@apache.org.
YARN-3749. We should make a copy of configuration when init
MiniYARNCluster with multiple RMs. Contributed by Chun Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d39b344
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d39b344
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d39b344

Branch: refs/heads/YARN-2928
Commit: 8d39b344afe459309d73fe647e3459addf0415a4
Parents: 2e58586
Author: Xuan <xg...@apache.org>
Authored: Wed Jun 3 17:20:15 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:15 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../hadoop/yarn/conf/YarnConfiguration.java     |  2 +-
 .../hadoop/yarn/client/ProtocolHATestBase.java  | 26 ++------------
 ...estApplicationMasterServiceProtocolOnHA.java | 10 +++---
 .../hadoop/yarn/client/TestRMFailover.java      | 26 ++------------
 .../hadoop/yarn/conf/TestYarnConfiguration.java | 22 ++++++++++++
 .../ApplicationMasterService.java               | 19 +++++-----
 .../yarn/server/resourcemanager/HATestUtil.java | 38 ++++++++++++++++++++
 .../resourcemanager/TestRMEmbeddedElector.java  | 24 ++-----------
 .../hadoop/yarn/server/MiniYARNCluster.java     | 22 +++++++-----
 .../hadoop/yarn/server/TestMiniYarnCluster.java | 37 +++++++++++++++++++
 11 files changed, 138 insertions(+), 91 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 95a2325..61cc501 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -572,6 +572,9 @@ Release 2.8.0 - UNRELEASED
 
     YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)
 
+    YARN-3749. We should make a copy of configuration when init MiniYARNCluster
+    with multiple RMs. (Chun Chen via xgong)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ba07c80..e4ae2b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1991,7 +1991,7 @@ public class YarnConfiguration extends Configuration {
   public InetSocketAddress updateConnectAddr(String name,
                                              InetSocketAddress addr) {
     String prefix = name;
-    if (HAUtil.isHAEnabled(this)) {
+    if (HAUtil.isHAEnabled(this) && getServiceAddressConfKeys(this).contains(name)) {
       prefix = HAUtil.addSuffix(prefix, HAUtil.getRMHAId(this));
     }
     return super.updateConnectAddr(prefix, addr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 903dd94..75e6cee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -36,6 +36,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
@@ -101,7 +102,6 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
@@ -161,26 +161,6 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
   protected Thread failoverThread = null;
   private volatile boolean keepRunning;
 
-  private void setConfForRM(String rmId, String prefix, String value) {
-    conf.set(HAUtil.addSuffix(prefix, rmId), value);
-  }
-
-  private void setRpcAddressForRM(String rmId, int base) {
-    setConfForRM(rmId, YarnConfiguration.RM_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_SCHEDULER_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_ADMIN_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_ADMIN_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
-        "0.0.0.0:" + (base + YarnConfiguration
-            .DEFAULT_RM_RESOURCE_TRACKER_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_WEBAPP_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT));
-  }
-
   @Before
   public void setup() throws IOException {
     failoverThread = null;
@@ -189,8 +169,8 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
     conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
     conf.setInt(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS, 5);
     conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
-    setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE);
-    setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE);
+    HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
+    HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
 
     conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
index 41e1800..ad86fb3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
@@ -54,18 +54,18 @@ public class TestApplicationMasterServiceProtocolOnHA
   public void initialize() throws Exception {
     startHACluster(0, false, false, true);
     attemptId = this.cluster.createFakeApplicationAttemptId();
-    amClient = ClientRMProxy
-        .createRMProxy(this.conf, ApplicationMasterProtocol.class);
 
     Token<AMRMTokenIdentifier> appToken =
         this.cluster.getResourceManager().getRMContext()
           .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId);
-    appToken.setService(ClientRMProxy.getAMRMTokenService(conf));
+    appToken.setService(ClientRMProxy.getAMRMTokenService(this.conf));
     UserGroupInformation.setLoginUser(UserGroupInformation
-        .createRemoteUser(UserGroupInformation.getCurrentUser()
-            .getUserName()));
+        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
     UserGroupInformation.getCurrentUser().addToken(appToken);
     syncToken(appToken);
+
+    amClient = ClientRMProxy
+        .createRMProxy(this.conf, ApplicationMasterProtocol.class);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index cd22743..0d03fd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -38,11 +38,11 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
+import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.junit.After;
@@ -66,34 +66,14 @@ public class TestRMFailover extends ClientBaseWithFixes {
   private MiniYARNCluster cluster;
   private ApplicationId fakeAppId;
 
-
-  private void setConfForRM(String rmId, String prefix, String value) {
-    conf.set(HAUtil.addSuffix(prefix, rmId), value);
-  }
-
-  private void setRpcAddressForRM(String rmId, int base) {
-    setConfForRM(rmId, YarnConfiguration.RM_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_SCHEDULER_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_ADMIN_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_ADMIN_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_WEBAPP_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT));
-  }
-
   @Before
   public void setup() throws IOException {
     fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
     conf = new YarnConfiguration();
     conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
     conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
-    setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE);
-    setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE);
+    HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
+    HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
 
     conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
index 1d925a7..1f10810 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
@@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
 
@@ -202,5 +203,26 @@ public class TestYarnConfiguration {
         serverAddress);
 
     assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
+
+    //tests updateConnectAddr won't add suffix to NM service address configurations
+    conf = new YarnConfiguration();
+    conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "yo.yo.yo");
+    conf.set(YarnConfiguration.NM_BIND_HOST, "0.0.0.0");
+    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
+    conf.set(YarnConfiguration.RM_HA_ID, "rm1");
+
+    serverAddress = new InetSocketAddress(
+        YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[0],
+        Integer.valueOf(YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[1]));
+
+    InetSocketAddress localizerAddress = conf.updateConnectAddr(
+        YarnConfiguration.NM_BIND_HOST,
+        YarnConfiguration.NM_LOCALIZER_ADDRESS,
+        YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
+        serverAddress);
+
+    assertTrue(localizerAddress.toString().startsWith("yo.yo.yo"));
+    assertNull(conf.get(
+        HAUtil.addSuffix(YarnConfiguration.NM_LOCALIZER_ADDRESS, "rm1")));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index f5474bf..3637f91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -107,7 +107,7 @@ public class ApplicationMasterService extends AbstractService implements
   private static final Log LOG = LogFactory.getLog(ApplicationMasterService.class);
   private final AMLivelinessMonitor amLivelinessMonitor;
   private YarnScheduler rScheduler;
-  private InetSocketAddress bindAddress;
+  private InetSocketAddress masterServiceAddress;
   private Server server;
   private final RecordFactory recordFactory =
       RecordFactoryProvider.getRecordFactory(null);
@@ -123,15 +123,18 @@ public class ApplicationMasterService extends AbstractService implements
   }
 
   @Override
-  protected void serviceStart() throws Exception {
-    Configuration conf = getConfig();
-    YarnRPC rpc = YarnRPC.create(conf);
-
-    InetSocketAddress masterServiceAddress = conf.getSocketAddr(
+  protected void serviceInit(Configuration conf) throws Exception {
+    masterServiceAddress = conf.getSocketAddr(
         YarnConfiguration.RM_BIND_HOST,
         YarnConfiguration.RM_SCHEDULER_ADDRESS,
         YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
         YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    Configuration conf = getConfig();
+    YarnRPC rpc = YarnRPC.create(conf);
 
     Configuration serverConf = conf;
     // If the auth is not-simple, enforce it to be token-based.
@@ -160,7 +163,7 @@ public class ApplicationMasterService extends AbstractService implements
     }
     
     this.server.start();
-    this.bindAddress =
+    this.masterServiceAddress =
         conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST,
                                YarnConfiguration.RM_SCHEDULER_ADDRESS,
                                YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
@@ -170,7 +173,7 @@ public class ApplicationMasterService extends AbstractService implements
 
   @Private
   public InetSocketAddress getBindAddress() {
-    return this.bindAddress;
+    return this.masterServiceAddress;
   }
 
   // Obtain the needed AMRMTokenIdentifier from the remote-UGI. RPC layer

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
new file mode 100644
index 0000000..710ce87
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/HATestUtil.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.HAUtil;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+public class HATestUtil {
+
+  public static void setRpcAddressForRM(String rmId, int base,
+      Configuration conf) {
+    for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
+      setConfForRM(rmId, confKey, "0.0.0.0:" + (base +
+          YarnConfiguration.getRMDefaultPortNumber(confKey, conf)), conf);
+    }
+  }
+
+  public static void setConfForRM(String rmId, String prefix, String value,
+      Configuration conf) {
+    conf.set(HAUtil.addSuffix(prefix, rmId), value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 0d9ee6d..1b0bf7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -22,7 +22,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.ServiceFailedException;
-import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Before;
 import org.junit.Test;
@@ -42,25 +41,6 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
   private Configuration conf;
   private AtomicBoolean callbackCalled;
 
-  private void setConfForRM(String rmId, String prefix, String value) {
-    conf.set(HAUtil.addSuffix(prefix, rmId), value);
-  }
-
-  private void setRpcAddressForRM(String rmId, int base) {
-    setConfForRM(rmId, YarnConfiguration.RM_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_SCHEDULER_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_ADMIN_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_ADMIN_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_WEBAPP_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT));
-    setConfForRM(rmId, YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, "0.0.0.0:" +
-        (base + YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT));
-  }
-
   @Before
   public void setup() throws IOException {
     conf = new YarnConfiguration();
@@ -73,8 +53,8 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
 
     conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
     conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID);
-    setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE);
-    setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE);
+    HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
+    HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
 
     conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 65c2760..7fb0698 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -287,10 +287,12 @@ public class MiniYARNCluster extends CompositeService {
   }
 
   private synchronized void initResourceManager(int index, Configuration conf) {
-    if (HAUtil.isHAEnabled(conf)) {
-      conf.set(YarnConfiguration.RM_HA_ID, rmIds[index]);
+    Configuration newConf = resourceManagers.length > 1 ?
+        new YarnConfiguration(conf) : conf;
+    if (HAUtil.isHAEnabled(newConf)) {
+      newConf.set(YarnConfiguration.RM_HA_ID, rmIds[index]);
     }
-    resourceManagers[index].init(conf);
+    resourceManagers[index].init(newConf);
     resourceManagers[index].getRMContext().getDispatcher().register(
         RMAppAttemptEventType.class,
         new EventHandler<RMAppAttemptEvent>() {
@@ -329,10 +331,11 @@ public class MiniYARNCluster extends CompositeService {
     } catch (Throwable t) {
       throw new YarnRuntimeException(t);
     }
+    Configuration conf = resourceManagers[index].getConfig();
     LOG.info("MiniYARN ResourceManager address: " +
-        getConfig().get(YarnConfiguration.RM_ADDRESS));
+        conf.get(YarnConfiguration.RM_ADDRESS));
     LOG.info("MiniYARN ResourceManager web address: " +
-        WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
+        WebAppUtils.getRMWebAppURLWithoutScheme(conf));
   }
 
   @InterfaceAudience.Private
@@ -352,7 +355,6 @@ public class MiniYARNCluster extends CompositeService {
       resourceManagers[index].stop();
       resourceManagers[index] = null;
     }
-    Configuration conf = getConfig();
     resourceManagers[index] = new ResourceManager();
     initResourceManager(index, getConfig());
     startResourceManager(index);
@@ -433,6 +435,7 @@ public class MiniYARNCluster extends CompositeService {
   private class ResourceManagerWrapper extends AbstractService {
     private int index;
 
+
     public ResourceManagerWrapper(int i) {
       super(ResourceManagerWrapper.class.getName() + "_" + i);
       index = i;
@@ -448,10 +451,11 @@ public class MiniYARNCluster extends CompositeService {
     @Override
     protected synchronized void serviceStart() throws Exception {
       startResourceManager(index);
+      Configuration conf = resourceManagers[index].getConfig();
       LOG.info("MiniYARN ResourceManager address: " +
-               getConfig().get(YarnConfiguration.RM_ADDRESS));
-      LOG.info("MiniYARN ResourceManager web address: " +
-               WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
+          conf.get(YarnConfiguration.RM_ADDRESS));
+      LOG.info("MiniYARN ResourceManager web address: " + WebAppUtils
+          .getRMWebAppURLWithoutScheme(conf));
       super.serviceStart();
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d39b344/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
index 8a3c9e7..3e35bd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.yarn.server;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -112,4 +114,39 @@ public class TestMiniYarnCluster {
       }
     }
   }
+
+  @Test
+  public void testMultiRMConf() {
+    String RM1_NODE_ID = "rm1", RM2_NODE_ID = "rm2";
+    int RM1_PORT_BASE = 10000, RM2_PORT_BASE = 20000;
+    Configuration conf = new YarnConfiguration();
+    conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
+    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
+    conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
+    HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf);
+    HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf);
+    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
+    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true);
+
+    MiniYARNCluster cluster =
+        new MiniYARNCluster(TestMiniYarnCluster.class.getName(),
+            2, 0, 1, 1);
+    cluster.init(conf);
+    Configuration conf1 = cluster.getResourceManager(0).getConfig(),
+        conf2 = cluster.getResourceManager(1).getConfig();
+    Assert.assertFalse(conf1 == conf2);
+    Assert.assertEquals("0.0.0.0:18032",
+        conf1.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM1_NODE_ID)));
+    Assert.assertEquals("0.0.0.0:28032",
+        conf1.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM2_NODE_ID)));
+    Assert.assertEquals("rm1", conf1.get(YarnConfiguration.RM_HA_ID));
+
+    Assert.assertEquals("0.0.0.0:18032",
+        conf2.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM1_NODE_ID)));
+    Assert.assertEquals("0.0.0.0:28032",
+        conf2.get(HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS, RM2_NODE_ID)));
+    Assert.assertEquals("rm2", conf2.get(YarnConfiguration.RM_HA_ID));
+  }
 }


[32/50] hadoop git commit: MAPREDUCE-6377. JHS sorting on state column not working in webUi. Contributed by zhihai xu.

Posted by zj...@apache.org.
MAPREDUCE-6377. JHS sorting on state column not working in webUi.
Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8f7f173
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8f7f173
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8f7f173

Branch: refs/heads/YARN-2928
Commit: c8f7f173778206e3aab05fe30572426bc6ae4001
Parents: 42ba35b
Author: Devaraj K <de...@apache.org>
Authored: Fri Jun 5 15:50:16 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:58 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                            | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java   | 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8f7f173/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 391303e..d78fb9c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -474,6 +474,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview.
     (Siqi Li via gera)
 
+    MAPREDUCE-6377. JHS sorting on state column not working in webUi.
+    (zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8f7f173/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
index 59b7aa6..229bbb0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java
@@ -87,10 +87,11 @@ public class HsView extends TwoColumnLayout {
         append(", bProcessing: true").
 
         // Sort by id upon page load
-        append(", aaSorting: [[2, 'desc']]").
+        append(", aaSorting: [[3, 'desc']]").
         append(", aoColumnDefs:[").
         // Maps Total, Maps Completed, Reduces Total and Reduces Completed
-        append("{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 7, 8, 9, 10 ] }").
+        append("{'sType':'numeric', 'bSearchable': false" +
+            ", 'aTargets': [ 8, 9, 10, 11 ] }").
         append("]}").
         toString();
   }


[08/50] hadoop git commit: YARN-3751. Fixed AppInfo to check if used resources are null. Contributed by Sunil G.

Posted by zj...@apache.org.
YARN-3751. Fixed AppInfo to check if used resources are null. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91a3b9f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91a3b9f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91a3b9f0

Branch: refs/heads/YARN-2928
Commit: 91a3b9f0389610e31e243df0541f9e8cd8d5de87
Parents: dadcb31
Author: Zhijie Shen <zj...@apache.org>
Authored: Wed Jun 3 11:51:41 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:13 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                       |  3 +++
 .../apache/hadoop/yarn/server/webapp/dao/AppInfo.java | 14 ++++++++------
 2 files changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a3b9f0/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a19ba88..21618c7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -570,6 +570,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3740. Fixed the typo in the configuration name:
     APPLICATION_HISTORY_PREFIX_MAX_APPS. (Xuan Gong via zjshen)
 
+    YARN-3751. Fixed AppInfo to check if used resources are null. (Sunil G via
+    zjshen)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a3b9f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index 0cc5f75..8f332a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -86,12 +86,14 @@ public class AppInfo {
     elapsedTime = Times.elapsed(startedTime, finishedTime);
     finalAppStatus = app.getFinalApplicationStatus();
     if (app.getApplicationResourceUsageReport() != null) {
-      runningContainers =
-          app.getApplicationResourceUsageReport().getNumUsedContainers();
-      allocatedCpuVcores = app.getApplicationResourceUsageReport()
-          .getUsedResources().getVirtualCores();
-      allocatedMemoryMB = app.getApplicationResourceUsageReport()
-          .getUsedResources().getMemory();
+      runningContainers = app.getApplicationResourceUsageReport()
+          .getNumUsedContainers();
+      if (app.getApplicationResourceUsageReport().getUsedResources() != null) {
+        allocatedCpuVcores = app.getApplicationResourceUsageReport()
+            .getUsedResources().getVirtualCores();
+        allocatedMemoryMB = app.getApplicationResourceUsageReport()
+            .getUsedResources().getMemory();
+      }
     }
     progress = app.getProgress() * 100; // in percent
     if (app.getApplicationTags() != null && !app.getApplicationTags().isEmpty()) {


[39/50] hadoop git commit: HADOOP-12055. Deprecate usage of NativeIO#link. Contributed by Andrew Wang.

Posted by zj...@apache.org.
HADOOP-12055. Deprecate usage of NativeIO#link. Contributed by Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd8bd6bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd8bd6bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd8bd6bc

Branch: refs/heads/YARN-2928
Commit: cd8bd6bc6fa4c1bfdab374f01ac769f4f0cdbc9e
Parents: f8153dd
Author: cnauroth <cn...@apache.org>
Authored: Sat Jun 6 09:17:03 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:00 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt          |  2 ++
 .../java/org/apache/hadoop/io/nativeio/NativeIO.java     | 11 +++++++++++
 .../apache/hadoop/hdfs/server/datanode/DataStorage.java  |  2 +-
 3 files changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8bd6bc/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4b1d0d1..eacc3be 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -640,6 +640,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect
     errors when listing a directory. (Zhihai Xu via wang)
 
+    HADOOP-12055. Deprecate usage of NativeIO#link. (Andrew Wang via cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8bd6bc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index bc6e62a..688b955 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -881,6 +881,17 @@ public class NativeIO {
     }
   }
 
+  /**
+   * Creates a hardlink "dst" that points to "src".
+   *
+   * This is deprecated since JDK7 NIO can create hardlinks via the
+   * {@link java.nio.file.Files} API.
+   *
+   * @param src source file
+   * @param dst hardlink location
+   * @throws IOException
+   */
+  @Deprecated
   public static void link(File src, File dst) throws IOException {
     if (!nativeLoaded) {
       HardLink.createHardLink(src, dst);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd8bd6bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 089e032..0bd08dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -1044,7 +1044,7 @@ public class DataStorage extends Storage {
               idBasedLayoutSingleLinks.size());
           for (int j = iCopy; j < upperBound; j++) {
             LinkArgs cur = idBasedLayoutSingleLinks.get(j);
-            NativeIO.link(cur.src, cur.dst);
+            HardLink.createHardLink(cur.src, cur.dst);
           }
           return null;
         }


[12/50] hadoop git commit: Revert "YARN-1462. Correct fix version from branch-2.7.1 to branch-2.8 in"

Posted by zj...@apache.org.
Revert "YARN-1462. Correct fix version from branch-2.7.1 to branch-2.8 in"

This reverts commit 0b5cfacde638bc25cc010cd9236369237b4e51a8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89899fed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89899fed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89899fed

Branch: refs/heads/YARN-2928
Commit: 89899fed769a42cdb2164aa17e5881086095f2fb
Parents: b9e8f79
Author: Zhijie Shen <zj...@apache.org>
Authored: Wed Jun 3 14:15:31 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:14 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89899fed/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fb9badc..ce77941 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -377,9 +377,6 @@ Release 2.8.0 - UNRELEASED
     YARN-3467. Expose allocatedMB, allocatedVCores, and runningContainers metrics on 
     running Applications in RM Web UI. (Anubhav Dhoot via kasha)
 
-    YARN-1462. Made RM write application tags to timeline server and exposed them
-    to users via generic history web UI and REST API. (Xuan Gong via zjshen)
-
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -595,6 +592,9 @@ Release 2.7.1 - UNRELEASED
     YARN-3723. Need to clearly document primaryFilter and otherInfo value type.
     (Zhijie Shen via xgong)
 
+    YARN-1462. Made RM write application tags to timeline server and exposed them
+    to users via generic history web UI and REST API. (Xuan Gong via zjshen)
+
   OPTIMIZATIONS
 
   BUG FIXES


[43/50] hadoop git commit: YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation and container reservation. (Zhihai Xu via kasha)

Posted by zj...@apache.org.
YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation and container reservation. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e2c3dec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e2c3dec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e2c3dec

Branch: refs/heads/YARN-2928
Commit: 1e2c3deccb39a2a73aa5792f69c15e8072f48399
Parents: ee73b53
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sun Jun 7 11:37:52 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:01 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../scheduler/fair/FSAppAttempt.java            | 128 +++++----
 .../resourcemanager/scheduler/fair/FSQueue.java |  15 +
 .../scheduler/fair/FairScheduler.java           |  42 +--
 .../scheduler/fair/TestFairScheduler.java       | 282 +++++++++++++++++++
 5 files changed, 378 insertions(+), 92 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e2c3dec/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3643d0c..67a705c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -589,6 +589,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3766. Fixed the apps table column error of generic history web UI.
     (Xuan Gong via zjshen)
 
+    YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation
+    and container reservation. (Zhihai Xu via kasha)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e2c3dec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 6287deb..7419446 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -541,39 +541,37 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
       }
 
       return container.getResource();
-    } else {
-      if (!FairScheduler.fitsInMaxShare(getQueue(), capability)) {
-        return Resources.none();
-      }
+    }
 
-      // The desired container won't fit here, so reserve
-      reserve(request.getPriority(), node, container, reserved);
+    // The desired container won't fit here, so reserve
+    reserve(request.getPriority(), node, container, reserved);
 
-      return FairScheduler.CONTAINER_RESERVED;
-    }
+    return FairScheduler.CONTAINER_RESERVED;
   }
 
   private boolean hasNodeOrRackLocalRequests(Priority priority) {
     return getResourceRequests(priority).size() > 1;
   }
 
-  private Resource assignContainer(FSSchedulerNode node, boolean reserved) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Node offered to app: " + getName() + " reserved: " + reserved);
-    }
-
+  /**
+   * Whether the AM container for this app is over maxAMShare limit.
+   */
+  private boolean isOverAMShareLimit() {
     // Check the AM resource usage for the leaf queue
     if (!isAmRunning() && !getUnmanagedAM()) {
       List<ResourceRequest> ask = appSchedulingInfo.getAllResourceRequests();
       if (ask.isEmpty() || !getQueue().canRunAppAM(
           ask.get(0).getCapability())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Skipping allocation because maxAMShare limit would " +
-              "be exceeded");
-        }
-        return Resources.none();
+        return true;
       }
     }
+    return false;
+  }
+
+  private Resource assignContainer(FSSchedulerNode node, boolean reserved) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Node offered to app: " + getName() + " reserved: " + reserved);
+    }
 
     Collection<Priority> prioritiesToTry = (reserved) ?
         Arrays.asList(node.getReservedContainer().getReservedPriority()) :
@@ -584,8 +582,9 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     // (not scheduled) in order to promote better locality.
     synchronized (this) {
       for (Priority priority : prioritiesToTry) {
-        if (getTotalRequiredResources(priority) <= 0 ||
-            !hasContainerForNode(priority, node)) {
+        // Skip it for reserved container, since
+        // we already check it in isValidReservation.
+        if (!reserved && !hasContainerForNode(priority, node)) {
           continue;
         }
 
@@ -651,41 +650,10 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   }
 
   /**
-   * Called when this application already has an existing reservation on the
-   * given node.  Sees whether we can turn the reservation into an allocation.
-   * Also checks whether the application needs the reservation anymore, and
-   * releases it if not.
-   *
-   * @param node
-   *     Node that the application has an existing reservation on
-   */
-  public Resource assignReservedContainer(FSSchedulerNode node) {
-    RMContainer rmContainer = node.getReservedContainer();
-    Priority priority = rmContainer.getReservedPriority();
-
-    // Make sure the application still needs requests at this priority
-    if (getTotalRequiredResources(priority) == 0) {
-      unreserve(priority, node);
-      return Resources.none();
-    }
-
-    // Fail early if the reserved container won't fit.
-    // Note that we have an assumption here that there's only one container size
-    // per priority.
-    if (!Resources.fitsIn(node.getReservedContainer().getReservedResource(),
-        node.getAvailableResource())) {
-      return Resources.none();
-    }
-
-    return assignContainer(node, true);
-  }
-
-
-  /**
    * Whether this app has containers requests that could be satisfied on the
    * given node, if the node had full space.
    */
-  public boolean hasContainerForNode(Priority prio, FSSchedulerNode node) {
+  private boolean hasContainerForNode(Priority prio, FSSchedulerNode node) {
     ResourceRequest anyRequest = getResourceRequest(prio, ResourceRequest.ANY);
     ResourceRequest rackRequest = getResourceRequest(prio, node.getRackName());
     ResourceRequest nodeRequest = getResourceRequest(prio, node.getNodeName());
@@ -703,9 +671,56 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
                 (nodeRequest != null && nodeRequest.getNumContainers() > 0)) &&
             // The requested container must be able to fit on the node:
             Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null,
-                anyRequest.getCapability(), node.getRMNode().getTotalCapability());
+                anyRequest.getCapability(),
+                node.getRMNode().getTotalCapability()) &&
+            // The requested container must fit in queue maximum share:
+            getQueue().fitsInMaxShare(anyRequest.getCapability());
   }
 
+  private boolean isValidReservation(FSSchedulerNode node) {
+    Priority reservedPriority = node.getReservedContainer().
+        getReservedPriority();
+    return hasContainerForNode(reservedPriority, node) &&
+        !isOverAMShareLimit();
+  }
+
+  /**
+   * Called when this application already has an existing reservation on the
+   * given node.  Sees whether we can turn the reservation into an allocation.
+   * Also checks whether the application needs the reservation anymore, and
+   * releases it if not.
+   *
+   * @param node
+   *     Node that the application has an existing reservation on
+   * @return whether the reservation on the given node is valid.
+   */
+  public boolean assignReservedContainer(FSSchedulerNode node) {
+    RMContainer rmContainer = node.getReservedContainer();
+    Priority reservedPriority = rmContainer.getReservedPriority();
+
+    if (!isValidReservation(node)) {
+      // Don't hold the reservation if app can no longer use it
+      LOG.info("Releasing reservation that cannot be satisfied for " +
+          "application " + getApplicationAttemptId() + " on node " + node);
+      unreserve(reservedPriority, node);
+      return false;
+    }
+
+    // Reservation valid; try to fulfill the reservation
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Trying to fulfill reservation for application "
+          + getApplicationAttemptId() + " on node: " + node);
+    }
+
+    // Fail early if the reserved container won't fit.
+    // Note that we have an assumption here that
+    // there's only one container size per priority.
+    if (Resources.fitsIn(node.getReservedContainer().getReservedResource(),
+        node.getAvailableResource())) {
+      assignContainer(node, true);
+    }
+    return true;
+  }
 
   static class RMContainerComparator implements Comparator<RMContainer>,
       Serializable {
@@ -795,6 +810,13 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
 
   @Override
   public Resource assignContainer(FSSchedulerNode node) {
+    if (isOverAMShareLimit()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Skipping allocation because maxAMShare limit would " +
+            "be exceeded");
+      }
+      return Resources.none();
+    }
     return assignContainer(node, false);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e2c3dec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index ade2880..e488c76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -330,4 +330,19 @@ public abstract class FSQueue implements Queue, Schedulable {
   @Override
   public void decPendingResource(String nodeLabel, Resource resourceToDec) {
   }
+
+  public boolean fitsInMaxShare(Resource additionalResource) {
+    Resource usagePlusAddition =
+        Resources.add(getResourceUsage(), additionalResource);
+
+    if (!Resources.fitsIn(usagePlusAddition, getMaxShare())) {
+      return false;
+    }
+
+    FSQueue parentQueue = getParent();
+    if (parentQueue != null) {
+      return parentQueue.fitsInMaxShare(additionalResource);
+    }
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e2c3dec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 64b3f12..2ed3b2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -1075,31 +1074,12 @@ public class FairScheduler extends
     // 1. Check for reserved applications
     // 2. Schedule if there are no reservations
 
+    boolean validReservation = false;
     FSAppAttempt reservedAppSchedulable = node.getReservedAppSchedulable();
     if (reservedAppSchedulable != null) {
-      Priority reservedPriority = node.getReservedContainer().getReservedPriority();
-      FSQueue queue = reservedAppSchedulable.getQueue();
-
-      if (!reservedAppSchedulable.hasContainerForNode(reservedPriority, node)
-          || !fitsInMaxShare(queue,
-          node.getReservedContainer().getReservedResource())) {
-        // Don't hold the reservation if app can no longer use it
-        LOG.info("Releasing reservation that cannot be satisfied for application "
-            + reservedAppSchedulable.getApplicationAttemptId()
-            + " on node " + node);
-        reservedAppSchedulable.unreserve(reservedPriority, node);
-        reservedAppSchedulable = null;
-      } else {
-        // Reservation exists; try to fulfill the reservation
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Trying to fulfill reservation for application "
-              + reservedAppSchedulable.getApplicationAttemptId()
-              + " on node: " + node);
-        }
-        node.getReservedAppSchedulable().assignReservedContainer(node);
-      }
+      validReservation = reservedAppSchedulable.assignReservedContainer(node);
     }
-    if (reservedAppSchedulable == null) {
+    if (!validReservation) {
       // No reservation, schedule at queue which is farthest below fair share
       int assignedContainers = 0;
       while (node.getReservedContainer() == null) {
@@ -1117,22 +1097,6 @@ public class FairScheduler extends
     updateRootQueueMetrics();
   }
 
-  static boolean fitsInMaxShare(FSQueue queue, Resource
-      additionalResource) {
-    Resource usagePlusAddition =
-        Resources.add(queue.getResourceUsage(), additionalResource);
-
-    if (!Resources.fitsIn(usagePlusAddition, queue.getMaxShare())) {
-      return false;
-    }
-    
-    FSQueue parentQueue = queue.getParent();
-    if (parentQueue != null) {
-      return fitsInMaxShare(parentQueue, additionalResource);
-    }
-    return true;
-  }
-
   public FSAppAttempt getSchedulerApp(ApplicationAttemptId appAttemptId) {
     return super.getApplicationAttempt(appAttemptId);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e2c3dec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 94fdc1a..56e8adc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -3701,6 +3701,288 @@ public class TestFairScheduler extends FairSchedulerTestBase {
         0, queue2.getAmResourceUsage().getMemory());
   }
 
+  /**
+   * The test verifies container gets reserved when not over maxAMShare,
+   * reserved container gets unreserved when over maxAMShare,
+   * container doesn't get reserved when over maxAMShare,
+   * reserved container is turned into an allocation and
+   * superfluously reserved container gets unreserved.
+   * 1. create three nodes: Node1 is 10G, Node2 is 10G and Node3 is 5G.
+   * 2. APP1 allocated 1G on Node1 and APP2 allocated 1G on Node2.
+   * 3. APP3 reserved 10G on Node1 and Node2.
+   * 4. APP4 allocated 5G on Node3, which makes APP3 over maxAMShare.
+   * 5. Remove APP1 to make Node1 have 10G available resource.
+   * 6. APP3 unreserved its container on Node1 because it is over maxAMShare.
+   * 7. APP5 allocated 1G on Node1 after APP3 unreserved its container.
+   * 8. Remove APP3.
+   * 9. APP6 failed to reserve a 10G container on Node1 due to AMShare limit.
+   * 10. APP7 allocated 1G on Node1.
+   * 11. Remove APP4 and APP5.
+   * 12. APP6 reserved 10G on Node1 and Node2.
+   * 13. APP8 failed to allocate a 1G container on Node1 and Node2 because
+   *     APP6 reserved Node1 and Node2.
+   * 14. Remove APP2.
+   * 15. APP6 turned the 10G reservation into an allocation on node2.
+   * 16. APP6 unreserved its container on node1, APP8 allocated 1G on Node1.
+   */
+  @Test
+  public void testQueueMaxAMShareWithContainerReservation() throws Exception {
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println("<?xml version=\"1.0\"?>");
+    out.println("<allocations>");
+    out.println("<queue name=\"queue1\">");
+    out.println("<maxAMShare>0.5</maxAMShare>");
+    out.println("</queue>");
+    out.println("</allocations>");
+    out.close();
+
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    RMNode node1 =
+        MockNodes.newNodeInfo(1, Resources.createResource(10240, 10),
+            1, "127.0.0.1");
+    RMNode node2 =
+        MockNodes.newNodeInfo(1, Resources.createResource(10240, 10),
+            2, "127.0.0.2");
+    RMNode node3 =
+        MockNodes.newNodeInfo(1, Resources.createResource(5120, 5),
+            3, "127.0.0.3");
+    NodeAddedSchedulerEvent nodeE1 = new NodeAddedSchedulerEvent(node1);
+    NodeUpdateSchedulerEvent updateE1 = new NodeUpdateSchedulerEvent(node1);
+    NodeAddedSchedulerEvent nodeE2 = new NodeAddedSchedulerEvent(node2);
+    NodeUpdateSchedulerEvent updateE2 = new NodeUpdateSchedulerEvent(node2);
+    NodeAddedSchedulerEvent nodeE3 = new NodeAddedSchedulerEvent(node3);
+    NodeUpdateSchedulerEvent updateE3 = new NodeUpdateSchedulerEvent(node3);
+    scheduler.handle(nodeE1);
+    scheduler.handle(nodeE2);
+    scheduler.handle(nodeE3);
+    scheduler.update();
+    FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1",
+        true);
+    Resource amResource1 = Resource.newInstance(1024, 1);
+    Resource amResource2 = Resource.newInstance(1024, 1);
+    Resource amResource3 = Resource.newInstance(10240, 1);
+    Resource amResource4 = Resource.newInstance(5120, 1);
+    Resource amResource5 = Resource.newInstance(1024, 1);
+    Resource amResource6 = Resource.newInstance(10240, 1);
+    Resource amResource7 = Resource.newInstance(1024, 1);
+    Resource amResource8 = Resource.newInstance(1024, 1);
+    int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
+    ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
+    createApplicationWithAMResource(attId1, "queue1", "user1", amResource1);
+    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId1);
+    FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
+    scheduler.update();
+    // Allocate app1's AM container on node1.
+    scheduler.handle(updateE1);
+    assertEquals("Application1's AM requests 1024 MB memory",
+        1024, app1.getAMResource().getMemory());
+    assertEquals("Application1's AM should be running",
+        1, app1.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 1024 MB memory",
+        1024, queue1.getAmResourceUsage().getMemory());
+
+    ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
+    createApplicationWithAMResource(attId2, "queue1", "user1", amResource2);
+    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId2);
+    FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
+    scheduler.update();
+    // Allocate app2's AM container on node2.
+    scheduler.handle(updateE2);
+    assertEquals("Application2's AM requests 1024 MB memory",
+        1024, app2.getAMResource().getMemory());
+    assertEquals("Application2's AM should be running",
+        1, app2.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 2048 MB memory",
+        2048, queue1.getAmResourceUsage().getMemory());
+
+    ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
+    createApplicationWithAMResource(attId3, "queue1", "user1", amResource3);
+    createSchedulingRequestExistingApplication(10240, 1, amPriority, attId3);
+    FSAppAttempt app3 = scheduler.getSchedulerApp(attId3);
+    scheduler.update();
+    // app3 reserves a container on node1 because node1's available resource
+    // is less than app3's AM container resource.
+    scheduler.handle(updateE1);
+    // Similarly app3 reserves a container on node2.
+    scheduler.handle(updateE2);
+    assertEquals("Application3's AM resource shouldn't be updated",
+        0, app3.getAMResource().getMemory());
+    assertEquals("Application3's AM should not be running",
+        0, app3.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 2048 MB memory",
+        2048, queue1.getAmResourceUsage().getMemory());
+
+    ApplicationAttemptId attId4 = createAppAttemptId(4, 1);
+    createApplicationWithAMResource(attId4, "queue1", "user1", amResource4);
+    createSchedulingRequestExistingApplication(5120, 1, amPriority, attId4);
+    FSAppAttempt app4 = scheduler.getSchedulerApp(attId4);
+    scheduler.update();
+    // app4 can't allocate its AM container on node1 because
+    // app3 already reserved its container on node1.
+    scheduler.handle(updateE1);
+    assertEquals("Application4's AM resource shouldn't be updated",
+        0, app4.getAMResource().getMemory());
+    assertEquals("Application4's AM should not be running",
+        0, app4.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 2048 MB memory",
+        2048, queue1.getAmResourceUsage().getMemory());
+
+    scheduler.update();
+    // Allocate app4's AM container on node3.
+    scheduler.handle(updateE3);
+    assertEquals("Application4's AM requests 5120 MB memory",
+        5120, app4.getAMResource().getMemory());
+    assertEquals("Application4's AM should be running",
+        1, app4.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 7168 MB memory",
+        7168, queue1.getAmResourceUsage().getMemory());
+
+    AppAttemptRemovedSchedulerEvent appRemovedEvent1 =
+        new AppAttemptRemovedSchedulerEvent(attId1,
+            RMAppAttemptState.FINISHED, false);
+    // Release app1's AM container on node1.
+    scheduler.handle(appRemovedEvent1);
+    assertEquals("Queue1's AM resource usage should be 6144 MB memory",
+        6144, queue1.getAmResourceUsage().getMemory());
+
+    ApplicationAttemptId attId5 = createAppAttemptId(5, 1);
+    createApplicationWithAMResource(attId5, "queue1", "user1", amResource5);
+    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId5);
+    FSAppAttempt app5 = scheduler.getSchedulerApp(attId5);
+    scheduler.update();
+    // app5 can allocate its AM container on node1 after
+    // app3 unreserve its container on node1 due to
+    // exceeding queue MaxAMShare limit.
+    scheduler.handle(updateE1);
+    assertEquals("Application5's AM requests 1024 MB memory",
+        1024, app5.getAMResource().getMemory());
+    assertEquals("Application5's AM should be running",
+        1, app5.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 7168 MB memory",
+        7168, queue1.getAmResourceUsage().getMemory());
+
+    AppAttemptRemovedSchedulerEvent appRemovedEvent3 =
+        new AppAttemptRemovedSchedulerEvent(attId3,
+            RMAppAttemptState.FINISHED, false);
+    // Remove app3.
+    scheduler.handle(appRemovedEvent3);
+    assertEquals("Queue1's AM resource usage should be 7168 MB memory",
+        7168, queue1.getAmResourceUsage().getMemory());
+
+    ApplicationAttemptId attId6 = createAppAttemptId(6, 1);
+    createApplicationWithAMResource(attId6, "queue1", "user1", amResource6);
+    createSchedulingRequestExistingApplication(10240, 1, amPriority, attId6);
+    FSAppAttempt app6 = scheduler.getSchedulerApp(attId6);
+    scheduler.update();
+    // app6 can't reserve a container on node1 because
+    // it exceeds queue MaxAMShare limit.
+    scheduler.handle(updateE1);
+    assertEquals("Application6's AM resource shouldn't be updated",
+        0, app6.getAMResource().getMemory());
+    assertEquals("Application6's AM should not be running",
+        0, app6.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 7168 MB memory",
+        7168, queue1.getAmResourceUsage().getMemory());
+
+    ApplicationAttemptId attId7 = createAppAttemptId(7, 1);
+    createApplicationWithAMResource(attId7, "queue1", "user1", amResource7);
+    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId7);
+    FSAppAttempt app7 = scheduler.getSchedulerApp(attId7);
+    scheduler.update();
+    // Allocate app7's AM container on node1 to prove
+    // app6 didn't reserve a container on node1.
+    scheduler.handle(updateE1);
+    assertEquals("Application7's AM requests 1024 MB memory",
+        1024, app7.getAMResource().getMemory());
+    assertEquals("Application7's AM should be running",
+        1, app7.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 8192 MB memory",
+        8192, queue1.getAmResourceUsage().getMemory());
+
+    AppAttemptRemovedSchedulerEvent appRemovedEvent4 =
+        new AppAttemptRemovedSchedulerEvent(attId4,
+            RMAppAttemptState.FINISHED, false);
+    // Release app4's AM container on node3.
+    scheduler.handle(appRemovedEvent4);
+    assertEquals("Queue1's AM resource usage should be 3072 MB memory",
+        3072, queue1.getAmResourceUsage().getMemory());
+
+    AppAttemptRemovedSchedulerEvent appRemovedEvent5 =
+        new AppAttemptRemovedSchedulerEvent(attId5,
+            RMAppAttemptState.FINISHED, false);
+    // Release app5's AM container on node1.
+    scheduler.handle(appRemovedEvent5);
+    assertEquals("Queue1's AM resource usage should be 2048 MB memory",
+              2048, queue1.getAmResourceUsage().getMemory());
+
+    scheduler.update();
+    // app6 reserves a container on node1 because node1's available resource
+    // is less than app6's AM container resource and
+    // app6 is not over AMShare limit.
+    scheduler.handle(updateE1);
+    // Similarly app6 reserves a container on node2.
+    scheduler.handle(updateE2);
+
+    ApplicationAttemptId attId8 = createAppAttemptId(8, 1);
+    createApplicationWithAMResource(attId8, "queue1", "user1", amResource8);
+    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId8);
+    FSAppAttempt app8 = scheduler.getSchedulerApp(attId8);
+    scheduler.update();
+    // app8 can't allocate a container on node1 because
+    // app6 already reserved a container on node1.
+    scheduler.handle(updateE1);
+    assertEquals("Application8's AM resource shouldn't be updated",
+        0, app8.getAMResource().getMemory());
+    assertEquals("Application8's AM should not be running",
+        0, app8.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 2048 MB memory",
+        2048, queue1.getAmResourceUsage().getMemory());
+    scheduler.update();
+    // app8 can't allocate a container on node2 because
+    // app6 already reserved a container on node2.
+    scheduler.handle(updateE2);
+    assertEquals("Application8's AM resource shouldn't be updated",
+        0, app8.getAMResource().getMemory());
+    assertEquals("Application8's AM should not be running",
+        0, app8.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 2048 MB memory",
+        2048, queue1.getAmResourceUsage().getMemory());
+
+    AppAttemptRemovedSchedulerEvent appRemovedEvent2 =
+        new AppAttemptRemovedSchedulerEvent(attId2,
+            RMAppAttemptState.FINISHED, false);
+    // Release app2's AM container on node2.
+    scheduler.handle(appRemovedEvent2);
+    assertEquals("Queue1's AM resource usage should be 1024 MB memory",
+        1024, queue1.getAmResourceUsage().getMemory());
+
+    scheduler.update();
+    // app6 turns the reservation into an allocation on node2.
+    scheduler.handle(updateE2);
+    assertEquals("Application6's AM requests 10240 MB memory",
+        10240, app6.getAMResource().getMemory());
+    assertEquals("Application6's AM should be running",
+        1, app6.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 11264 MB memory",
+        11264, queue1.getAmResourceUsage().getMemory());
+
+    scheduler.update();
+    // app6 unreserve its container on node1 because
+    // it already got a container on node2.
+    // Now app8 can allocate its AM container on node1.
+    scheduler.handle(updateE1);
+    assertEquals("Application8's AM requests 1024 MB memory",
+        1024, app8.getAMResource().getMemory());
+    assertEquals("Application8's AM should be running",
+        1, app8.getLiveContainers().size());
+    assertEquals("Queue1's AM resource usage should be 12288 MB memory",
+        12288, queue1.getAmResourceUsage().getMemory());
+  }
+
   @Test
   public void testMaxRunningAppsHierarchicalQueues() throws Exception {
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);


[18/50] hadoop git commit: MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview. (Siqi Li via gera)

Posted by zj...@apache.org.
MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview. (Siqi Li via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8732f97c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8732f97c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8732f97c

Branch: refs/heads/YARN-2928
Commit: 8732f97cab213d54c5aa1d0e1ecfedbe758c2e33
Parents: df96753
Author: Gera Shegalov <ge...@apache.org>
Authored: Wed Jun 3 23:41:07 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:16 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                              | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8732f97c/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 9fa6c5a..391303e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -471,6 +471,9 @@ Release 2.8.0 - UNRELEASED
     high. Error is: "error=7, Argument list too long at if number of input
     file is high" (wilfreds via rkanter)
 
+    MAPREDUCE-6382. Don't escape HTML links in Diagnostics in JHS job overview.
+    (Siqi Li via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8732f97c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index f3341a6..dbd1dee 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -101,7 +101,7 @@ public class HsJobBlock extends HtmlBlock {
       for(String diag: diagnostics) {
         b.append(addTaskLinks(diag));
       }
-      infoBlock._("Diagnostics:", b.toString());
+      infoBlock._r("Diagnostics:", b.toString());
     }
 
     if(job.getNumMaps() > 0) {


[41/50] hadoop git commit: HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. (Contributed by Xiaoyu Yao)

Posted by zj...@apache.org.
HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33c03026
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33c03026
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33c03026

Branch: refs/heads/YARN-2928
Commit: 33c030260a792dec417c2ae7d57a59b2fa7ec4c3
Parents: 01cd698
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jun 5 15:09:06 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:00 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../hdfs/server/namenode/FSNamesystem.java      | 76 +++++++++-----------
 2 files changed, 35 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33c03026/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 48d8eb3..72ab17b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -957,6 +957,8 @@ Release 2.7.1 - UNRELEASED
     HDFS-8523. Remove usage information on unsupported operation
     "fsck -showprogress" from branch-2 (J.Andreina via vinayakumarb)
 
+    HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. (xyao)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33c03026/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5ed069d..dfbf04e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1956,10 +1956,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
                       long mtime)
       throws IOException, UnresolvedLinkException {
     String src = srcArg;
-    if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src="
-          + src + " newLength=" + newLength);
-    }
+    NameNode.stateChangeLog.debug(
+        "DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
     if (newLength < 0) {
       throw new HadoopIllegalArgumentException(
           "Cannot truncate to a negative file size: " + newLength + ".");
@@ -2108,10 +2106,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
       getBlockManager().addBlockCollection(truncatedBlockUC, file);
 
-      NameNode.stateChangeLog.info("BLOCK* prepareFileForTruncate: "
-          + "Scheduling copy-on-truncate to new size "
-          + truncatedBlockUC.getNumBytes() + " new block " + newBlock
-          + " old block " + truncatedBlockUC.getTruncateBlock());
+      NameNode.stateChangeLog.debug(
+          "BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" +
+          " size {}  new block {} old block {}", truncatedBlockUC.getNumBytes(),
+          newBlock, truncatedBlockUC.getTruncateBlock());
     } else {
       // Use new generation stamp for in-place truncate recovery
       blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
@@ -2124,10 +2122,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       truncatedBlockUC.getTruncateBlock().setGenerationStamp(
           newBlock.getGenerationStamp());
 
-      NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: "
-          + "Scheduling in-place block truncate to new size "
-          + truncatedBlockUC.getTruncateBlock().getNumBytes()
-          + " block=" + truncatedBlockUC);
+      NameNode.stateChangeLog.debug(
+          "BLOCK* prepareFileForTruncate: {} Scheduling in-place block " +
+          "truncate to new size {}",
+          truncatedBlockUC.getTruncateBlock().getNumBytes(), truncatedBlockUC);
     }
     if (shouldRecoverNow) {
       truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp());
@@ -2774,11 +2772,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       String clientMachine, boolean newBlock, boolean logRetryCache)
       throws IOException {
     String src = srcArg;
-    if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
-          + ", holder=" + holder
-          + ", clientMachine=" + clientMachine);
-    }
+    NameNode.stateChangeLog.debug(
+        "DIR* NameSystem.appendFile: src={}, holder={}, clientMachine={}",
+        src, holder, clientMachine);
     boolean skipSync = false;
     LocatedBlock lb = null;
     HdfsFileStatus stat = null;
@@ -2806,12 +2802,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       }
     }
     if (lb != null) {
-      if (NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file "
-            +src+" for "+holder+" at "+clientMachine
-            +" block " + lb.getBlock()
-            +" block size " + lb.getBlock().getNumBytes());
-      }
+      NameNode.stateChangeLog.debug(
+          "DIR* NameSystem.appendFile: file {} for {} at {} block {} block" +
+          " size {}", src, holder, clientMachine, lb.getBlock(),
+          lb.getBlock().getNumBytes());
     }
     logAuditEvent(true, "append", srcArg);
     return new LastBlockWithStatus(lb, stat);
@@ -2840,10 +2834,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   LocatedBlock getAdditionalBlock(
       String src, long fileId, String clientName, ExtendedBlock previous,
       DatanodeInfo[] excludedNodes, String[] favoredNodes) throws IOException {
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: "
-          + src + " inodeId " +  fileId  + " for " + clientName);
-    }
+    NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {}  inodeId {}" +
+        " for {}", src, fileId, clientName);
 
     waitForLoadingFSImage();
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
@@ -2950,10 +2942,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
       throws IOException {
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b
-          + "of file " + src);
-    }
+    NameNode.stateChangeLog.debug(
+        "BLOCK* NameSystem.abandonBlock: {} of file {}", b, src);
     waitForLoadingFSImage();
     checkOperation(OperationCategory.WRITE);
     FSPermissionChecker pc = getPermissionChecker();
@@ -2962,10 +2952,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src);
       FSDirWriteFileOp.abandonBlock(dir, pc, b, fileId, src, holder);
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
-                                      + b + " is removed from pendingCreates");
-      }
+      NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: {} is " +
+          "removed from pendingCreates", b);
     } finally {
       writeUnlock();
     }
@@ -2973,7 +2961,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   INodeFile checkLease(
-      String src, String holder, INode inode, long fileId) throws LeaseExpiredException, FileNotFoundException {
+      String src, String holder, INode inode, long fileId)
+      throws LeaseExpiredException, FileNotFoundException {
     assert hasReadLock();
     final String ident = src + " (inode " + fileId + ")";
     if (inode == null) {
@@ -4039,11 +4028,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     waitForLoadingFSImage();
     // file is closed
     getEditLog().logCloseFile(path, file);
-    if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("closeFile: "
-              +path+" with "+ file.getBlocks().length
-              +" blocks is persisted to the file system");
-    }
+    NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted" +
+        " to the file system", path, file.getBlocks().length);
   }
 
   /**
@@ -5903,7 +5889,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (cookieTab[0] == null) {
         cookieTab[0] = String.valueOf(getIntCookie(cookieTab[0]));
       }
-      LOG.info("there are no corrupt file blocks.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("there are no corrupt file blocks.");
+      }
       return corruptFiles;
     }
 
@@ -5938,7 +5926,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         }
       }
       cookieTab[0] = String.valueOf(skip);
-      LOG.info("list corrupt file blocks returned: " + count);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("list corrupt file blocks returned: " + count);
+      }
       return corruptFiles;
     } finally {
       readUnlock();


[35/50] hadoop git commit: MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the results file if its job fails. Contributed by Harsh J.

Posted by zj...@apache.org.
MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the
results file if its job fails. Contributed by Harsh J.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0962cdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0962cdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0962cdc

Branch: refs/heads/YARN-2928
Commit: a0962cdcc6c31d6bb025dc82d733ea75ae5ed3d8
Parents: c8f7f17
Author: Devaraj K <de...@apache.org>
Authored: Fri Jun 5 21:22:47 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:59 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                             | 3 +++
 .../main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java    | 4 ++++
 2 files changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0962cdc/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index d78fb9c..12e3a3f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -361,6 +361,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6174. Combine common stream code into parent class for
     InMemoryMapOutput and OnDiskMapOutput. (Eric Payne via gera)
 
+    MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the 
+    results file if its job fails. (Harsh J via devaraj)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0962cdc/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
index 25dee6b..1a0c372 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java
@@ -304,6 +304,10 @@ public class QuasiMonteCarlo extends Configured implements Tool {
       System.out.println("Starting Job");
       final long startTime = System.currentTimeMillis();
       job.waitForCompletion(true);
+      if (!job.isSuccessful()) {
+        System.out.println("Job " + job.getJobID() + " failed!");
+        System.exit(1);
+      }
       final double duration = (System.currentTimeMillis() - startTime)/1000.0;
       System.out.println("Job Finished in " + duration + " seconds");
 


[46/50] hadoop git commit: HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from RollingWindowManager. Contributed by Brahma Reddy Battula.

Posted by zj...@apache.org.
HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from RollingWindowManager. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee73b535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee73b535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee73b535

Branch: refs/heads/YARN-2928
Commit: ee73b535dd0ae25f43c34b95e5565f293fde1b19
Parents: bcf4319
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Sat Jun 6 18:47:45 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:01 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                  | 3 +++
 .../server/namenode/top/window/RollingWindowManager.java     | 8 +++-----
 2 files changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee73b535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f7f7f98..21f587f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -606,6 +606,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8432. Introduce a minimum compatible layout version to allow downgrade
     in more rolling upgrade use cases. (cnauroth)
 
+    HDFS-8116. Cleanup uncessary if LOG.isDebugEnabled() from
+    RollingWindowManager. (Brahma Reddy Battula via xyao)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee73b535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index 4759cc8..63438ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -187,11 +187,9 @@ public class RollingWindowManager {
    */
   public TopWindow snapshot(long time) {
     TopWindow window = new TopWindow(windowLenMs);
-    if (LOG.isDebugEnabled()) {
-      Set<String> metricNames = metricMap.keySet();
-      LOG.debug("iterating in reported metrics, size={} values={}",
-          metricNames.size(), metricNames);
-    }
+    Set<String> metricNames = metricMap.keySet();
+    LOG.debug("iterating in reported metrics, size={} values={}",
+        metricNames.size(), metricNames);
     for (Map.Entry<String, RollingWindowMap> entry : metricMap.entrySet()) {
       String metricName = entry.getKey();
       RollingWindowMap rollingWindows = entry.getValue();


[20/50] hadoop git commit: HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files and is created by 'git diff --no-prefix'. Contributed by Kengo Seki.

Posted by zj...@apache.org.
HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files and is created by 'git diff --no-prefix'. Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb2903ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb2903ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb2903ca

Branch: refs/heads/YARN-2928
Commit: bb2903ca235015e05eba60c1077f70f54c8de2df
Parents: c439926
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jun 4 11:14:55 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:16 2015 -0700

----------------------------------------------------------------------
 dev-support/smart-apply-patch.sh                | 47 ++++++++------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 2 files changed, 22 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb2903ca/dev-support/smart-apply-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh
index 449fc22..be29c47 100755
--- a/dev-support/smart-apply-patch.sh
+++ b/dev-support/smart-apply-patch.sh
@@ -12,7 +12,7 @@
 #   limitations under the License.
 
 #
-# Determine if the patch file is a git diff file with prefixes.
+# Determine if the git diff patch file has prefixes.
 # These files are generated via "git diff" *without* the --no-prefix option.
 #
 # We can apply these patches more easily because we know that the a/ and b/
@@ -21,28 +21,13 @@
 # And of course, we know that the patch file was generated using git, so we
 # know git apply can handle it properly.
 #
-# Arguments: file name.
-# Return: 0 if it is a git diff; 1 otherwise.
+# Arguments: git diff file name.
+# Return: 0 if it is a git diff with prefix; 1 otherwise.
 #
-is_git_diff_with_prefix() {
-  DIFF_TYPE="unknown"
-  while read -r line; do
-    if [[ "$line" =~ ^diff\  ]]; then
-      if [[ "$line" =~ ^diff\ \-\-git ]]; then
-        DIFF_TYPE="git"
-      else
-        return 1 # All diff lines must be diff --git lines.
-      fi
-    fi
-    if [[ "$line" =~ ^\+\+\+\  ]] ||
-       [[ "$line" =~ ^\-\-\-\  ]]; then
-      if ! [[ "$line" =~ ^....[ab]/ || "$line" =~ ^..../dev/null ]]; then
-        return 1 # All +++ and --- lines must start with a/ or b/ or be /dev/null.
-      fi
-    fi
-  done < $1
-  [ x$DIFF_TYPE == x"git" ] || return 1
-  return 0 # return true (= 0 in bash)
+has_prefix() {
+  awk '/^diff --git / { if ($3 !~ "^a/" || $4 !~ "^b/") { exit 1 } }
+    /^\+{3}|-{3} / { if ($2 !~ "^[ab]/" && $2 !~ "^/dev/null") { exit 1 } }' "$1"
+  return $?
 }
 
 PATCH_FILE=$1
@@ -100,15 +85,21 @@ if [[ ${PATCH_FILE} =~ ^http || ${PATCH_FILE} =~ ${ISSUE_RE} ]]; then
   PATCH_FILE="${PFILE}"
 fi
 
-# Special case for git-diff patches without --no-prefix
-if is_git_diff_with_prefix "$PATCH_FILE"; then
-  GIT_FLAGS="--binary -p1 -v"
+# Case for git-diff patches
+if grep -q "^diff --git" "${PATCH_FILE}"; then
+  GIT_FLAGS="--binary -v"
+  if has_prefix "$PATCH_FILE"; then
+    GIT_FLAGS="$GIT_FLAGS -p1"
+  else
+    GIT_FLAGS="$GIT_FLAGS -p0"
+  fi
   if [[ -z $DRY_RUN ]]; then
-      GIT_FLAGS="$GIT_FLAGS --stat --apply "
-      echo Going to apply git patch with: git apply "${GIT_FLAGS}"
+    GIT_FLAGS="$GIT_FLAGS --stat --apply"
+    echo Going to apply git patch with: git apply "${GIT_FLAGS}"
   else
-      GIT_FLAGS="$GIT_FLAGS --check "
+    GIT_FLAGS="$GIT_FLAGS --check"
   fi
+  # shellcheck disable=SC2086
   git apply ${GIT_FLAGS} "${PATCH_FILE}"
   exit $?
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb2903ca/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5c1fe41..53bb150 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -814,6 +814,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh
     is modified. (Kengo Seki via aajisaka)
 
+    HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files
+    and is created by 'git diff --no-prefix'. (Kengo Seki via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[24/50] hadoop git commit: YARN-3733. Fix DominantRC#compare() does not work as expected if cluster resource is empty. (Rohith Sharmaks via wangda)

Posted by zj...@apache.org.
YARN-3733. Fix DominantRC#compare() does not work as expected if cluster resource is empty. (Rohith Sharmaks via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08525ff3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08525ff3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08525ff3

Branch: refs/heads/YARN-2928
Commit: 08525ff38ae53ff7c1f48a5fcdf7906d53259c90
Parents: ea1a48a
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Jun 4 10:22:57 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:56 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../resource/DominantResourceCalculator.java    | 15 +++++
 .../capacity/TestCapacityScheduler.java         | 58 +++++++++++++++++++-
 3 files changed, 75 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08525ff3/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0c76206..83aa12f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -714,6 +714,9 @@ Release 2.7.1 - UNRELEASED
     YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM
     recovery is enabled (Rohith Sharmaks via jlowe)
 
+    YARN-3733. Fix DominantRC#compare() does not work as expected if 
+    cluster resource is empty. (Rohith Sharmaks via wangda)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08525ff3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 6f5b40e..2ee95ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -53,6 +53,21 @@ public class DominantResourceCalculator extends ResourceCalculator {
       return 0;
     }
     
+    if (isInvalidDivisor(clusterResource)) {
+      if ((lhs.getMemory() < rhs.getMemory() && lhs.getVirtualCores() > rhs
+          .getVirtualCores())
+          || (lhs.getMemory() > rhs.getMemory() && lhs.getVirtualCores() < rhs
+              .getVirtualCores())) {
+        return 0;
+      } else if (lhs.getMemory() > rhs.getMemory()
+          || lhs.getVirtualCores() > rhs.getVirtualCores()) {
+        return 1;
+      } else if (lhs.getMemory() < rhs.getMemory()
+          || lhs.getVirtualCores() < rhs.getVirtualCores()) {
+        return -1;
+      }
+    }
+
     float l = getResourceAsValue(clusterResource, lhs, true);
     float r = getResourceAsValue(clusterResource, rhs, true);
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08525ff3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 0361424..3827f85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -130,6 +130,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfoList;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
@@ -1281,9 +1282,15 @@ public class TestCapacityScheduler {
 
   private MockRM setUpMove() {
     CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+    return setUpMove(conf);
+  }
+
+  private MockRM setUpMove(Configuration config) {
+    CapacitySchedulerConfiguration conf =
+        new CapacitySchedulerConfiguration(config);
     setupQueueConfiguration(conf);
     conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
-      ResourceScheduler.class);
+        ResourceScheduler.class);
     MockRM rm = new MockRM(conf);
     rm.start();
     return rm;
@@ -2952,6 +2959,55 @@ public class TestCapacityScheduler {
     Assert.assertEquals(queueInfoB.getDefaultNodeLabelExpression(), "y");
   }
 
+  @Test(timeout = 30000)
+  public void testAMLimitUsage() throws Exception {
+
+    CapacitySchedulerConfiguration config =
+        new CapacitySchedulerConfiguration();
+
+    config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
+        DefaultResourceCalculator.class.getName());
+    verifyAMLimitForLeafQueue(config);
+
+    config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
+        DominantResourceCalculator.class.getName());
+    verifyAMLimitForLeafQueue(config);
+
+  }
+
+  private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config)
+      throws Exception {
+    MockRM rm = setUpMove(config);
+
+    String queueName = "a1";
+    String userName = "user_0";
+    ResourceScheduler scheduler = rm.getRMContext().getScheduler();
+    LeafQueue queueA =
+        (LeafQueue) ((CapacityScheduler) scheduler).getQueue(queueName);
+    Resource amResourceLimit = queueA.getAMResourceLimit();
+
+    Resource amResource =
+        Resource.newInstance(amResourceLimit.getMemory() + 1,
+            amResourceLimit.getVirtualCores() + 1);
+
+    rm.submitApp(amResource.getMemory(), "app-1", userName, null, queueName);
+
+    rm.submitApp(amResource.getMemory(), "app-1", userName, null, queueName);
+
+    // When AM limit is exceeded, 1 applications will be activated.Rest all
+    // applications will be in pending
+    Assert.assertEquals("PendingApplications should be 1", 1,
+        queueA.getNumPendingApplications());
+    Assert.assertEquals("Active applications should be 1", 1,
+        queueA.getNumActiveApplications());
+
+    Assert.assertEquals("User PendingApplications should be 1", 1, queueA
+        .getUser(userName).getPendingApplications());
+    Assert.assertEquals("User Active applications should be 1", 1, queueA
+        .getUser(userName).getActiveApplications());
+    rm.stop();
+  }
+
   private void setMaxAllocMb(Configuration conf, int maxAllocMb) {
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
         maxAllocMb);


[11/50] hadoop git commit: Revert "YARN-1462. Made RM write application tags to timeline server and exposed them to users via generic history web UI and REST API. Contributed by Xuan Gong."

Posted by zj...@apache.org.
Revert "YARN-1462. Made RM write application tags to timeline server and exposed them to users via generic history web UI and REST API. Contributed by Xuan Gong."

This reverts commit 4a9ec1a8243e2394ff7221b1c20dfaa80e9f5111.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bff83ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bff83ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bff83ca

Branch: refs/heads/YARN-2928
Commit: 2bff83caf92d2faf33522b417dd86a2ebace2d9f
Parents: 89899fe
Author: Zhijie Shen <zj...@apache.org>
Authored: Wed Jun 3 14:15:41 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:14 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/mapred/NotRunningJob.java |  2 +-
 .../mapred/TestClientServiceDelegate.java       |  4 +--
 .../apache/hadoop/mapred/TestYARNRunner.java    |  2 +-
 hadoop-yarn-project/CHANGES.txt                 |  3 --
 .../yarn/api/records/ApplicationReport.java     |  4 +--
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  2 +-
 .../yarn/client/api/impl/TestAHSClient.java     |  8 ++---
 .../yarn/client/api/impl/TestYarnClient.java    |  9 +++--
 .../hadoop/yarn/client/cli/TestYarnCLI.java     | 27 ++++++--------
 .../hadoop/yarn/api/TestApplicatonReport.java   |  3 +-
 .../ApplicationHistoryManagerImpl.java          |  2 +-
 ...pplicationHistoryManagerOnTimelineStore.java | 19 ++--------
 ...pplicationHistoryManagerOnTimelineStore.java |  9 -----
 .../metrics/ApplicationMetricsConstants.java    |  1 -
 .../metrics/ApplicationCreatedEvent.java        | 10 +-----
 .../metrics/SystemMetricsPublisher.java         |  4 +--
 .../applicationsmanager/MockAsm.java            |  2 +-
 .../metrics/TestSystemMetricsPublisher.java     | 37 --------------------
 .../resourcemanager/webapp/TestRMWebApp.java    |  2 +-
 .../src/site/markdown/TimelineServer.md         | 30 ++--------------
 20 files changed, 36 insertions(+), 144 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
index 1b26cd3..03552e4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
@@ -90,7 +90,7 @@ public class NotRunningJob implements MRClientProtocol {
     return ApplicationReport.newInstance(unknownAppId, unknownAttemptId,
       "N/A", "N/A", "N/A", "N/A", 0, null, YarnApplicationState.NEW, "N/A",
       "N/A", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
-      YarnConfiguration.DEFAULT_APPLICATION_TYPE, null, null);
+      YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
   NotRunningJob(ApplicationReport applicationReport, JobState jobState) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index c1c03df..b85f18d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -516,7 +516,7 @@ public class TestClientServiceDelegate {
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
       "appname", "host", 124, null, YarnApplicationState.FINISHED,
       "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
-      "N/A", 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null, null);
+      "N/A", 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
   private ApplicationReport getRunningApplicationReport(String host, int port) {
@@ -526,7 +526,7 @@ public class TestClientServiceDelegate {
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
       "appname", host, port, null, YarnApplicationState.RUNNING, "diagnostics",
       "url", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
-      YarnConfiguration.DEFAULT_APPLICATION_TYPE, null, null);
+      YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
   private ResourceMgrDelegate getRMDelegate() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
index 380fd62..0e53ab0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
@@ -196,7 +196,7 @@ public class TestYARNRunner extends TestCase {
             ApplicationReport.newInstance(appId, null, "tmp", "tmp", "tmp",
                 "tmp", 0, null, YarnApplicationState.FINISHED, "tmp", "tmp",
                 0l, 0l, FinalApplicationStatus.SUCCEEDED, null, null, 0f,
-                "tmp", null, null));
+                "tmp", null));
     yarnRunner.killJob(jobId);
     verify(clientDelegate).killJob(jobId);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ce77941..95a2325 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -592,9 +592,6 @@ Release 2.7.1 - UNRELEASED
     YARN-3723. Need to clearly document primaryFilter and otherInfo value type.
     (Zhijie Shen via xgong)
 
-    YARN-1462. Made RM write application tags to timeline server and exposed them
-    to users via generic history web UI and REST API. (Xuan Gong via zjshen)
-
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index 99e4e2e..e5d7254 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -59,8 +59,7 @@ public abstract class ApplicationReport {
       YarnApplicationState state, String diagnostics, String url,
       long startTime, long finishTime, FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
-      float progress, String applicationType, Token amRmToken,
-      Set<String> tags) {
+      float progress, String applicationType, Token amRmToken) {
     ApplicationReport report = Records.newRecord(ApplicationReport.class);
     report.setApplicationId(applicationId);
     report.setCurrentApplicationAttemptId(applicationAttemptId);
@@ -81,7 +80,6 @@ public abstract class ApplicationReport {
     report.setProgress(progress);
     report.setApplicationType(applicationType);
     report.setAMRMToken(amRmToken);
-    report.setApplicationTags(tags);
     return report;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 2260de5..903dd94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -647,7 +647,7 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
               "fakeQueue", "fakeApplicationName", "localhost", 0, null,
               YarnApplicationState.FINISHED, "fake an application report", "",
               1000l, 1200l, FinalApplicationStatus.FAILED, null, "", 50f,
-              "fakeApplicationType", null, null);
+              "fakeApplicationType", null);
       return report;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
index 12d0602..c3e3c41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
@@ -334,7 +334,7 @@ public class TestAHSClient {
             "queue", "appname", "host", 124, null,
             YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN",
-            null, null);
+            null);
       List<ApplicationReport> applicationReports =
           new ArrayList<ApplicationReport>();
       applicationReports.add(newApplicationReport);
@@ -391,7 +391,7 @@ public class TestAHSClient {
             "queue2", "appname2", "host2", 125, null,
             YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f,
-            "NON-YARN", null, null);
+            "NON-YARN", null);
       applicationReports.add(newApplicationReport2);
 
       ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7);
@@ -401,7 +401,7 @@ public class TestAHSClient {
             "queue3", "appname3", "host3", 126, null,
             YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f,
-            "MAPREDUCE", null, null);
+            "MAPREDUCE", null);
       applicationReports.add(newApplicationReport3);
 
       ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8);
@@ -411,7 +411,7 @@ public class TestAHSClient {
             "queue4", "appname4", "host4", 127, null,
             YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
-            "NON-MAPREDUCE", null, null);
+            "NON-MAPREDUCE", null);
       applicationReports.add(newApplicationReport4);
       reports = applicationReports;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index b7608b2..bc40b9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -613,8 +613,7 @@ public class TestYarnClient {
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           "user", "queue", "appname", "host", 124, null,
           YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
-          FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
-          null);
+          FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
       List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
       applicationReports.add(newApplicationReport);
       List<ApplicationAttemptReport> appAttempts = new ArrayList<ApplicationAttemptReport>();
@@ -690,7 +689,7 @@ public class TestYarnClient {
           "user2", "queue2", "appname2", "host2", 125, null,
           YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
-          null, null);
+        null);
       applicationReports.add(newApplicationReport2);
 
       ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7);
@@ -699,7 +698,7 @@ public class TestYarnClient {
           "user3", "queue3", "appname3", "host3", 126, null,
           YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE",
-          null, null);
+        null);
       applicationReports.add(newApplicationReport3);
 
       ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8);
@@ -710,7 +709,7 @@ public class TestYarnClient {
               "user4", "queue4", "appname4", "host4", 127, null,
               YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
               FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
-              "NON-MAPREDUCE", null, null);
+              "NON-MAPREDUCE", null);
       applicationReports.add(newApplicationReport4);
       return applicationReports;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 70c7d82..1013958 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -104,7 +104,7 @@ public class TestYarnCLI {
           "user", "queue", "appname", "host", 124, null,
           YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
           FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN",
-          null, null);
+          null);
       newApplicationReport.setLogAggregationStatus(LogAggregationStatus.SUCCEEDED);
       when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
           newApplicationReport);
@@ -350,8 +350,7 @@ public class TestYarnCLI {
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
-        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
-        null);
+        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
     applicationReports.add(newApplicationReport);
 
@@ -361,7 +360,7 @@ public class TestYarnCLI {
         "user2", "queue2", "appname2", "host2", 125, null,
         YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
-        null, null);
+      null);
     applicationReports.add(newApplicationReport2);
 
     ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7);
@@ -370,7 +369,7 @@ public class TestYarnCLI {
         "user3", "queue3", "appname3", "host3", 126, null,
         YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", 
-        null, null);
+        null);
     applicationReports.add(newApplicationReport3);
 
     ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8);
@@ -379,7 +378,7 @@ public class TestYarnCLI {
         "user4", "queue4", "appname4", "host4", 127, null,
         YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, "NON-MAPREDUCE",
-        null, null);
+        null);
     applicationReports.add(newApplicationReport4);
 
     ApplicationId applicationId5 = ApplicationId.newInstance(1234, 9);
@@ -388,7 +387,7 @@ public class TestYarnCLI {
         "user5", "queue5", "appname5", "host5", 128, null,
         YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5,
         FinalApplicationStatus.KILLED, null, "N/A", 0.93789f, "HIVE",
-        null, null);
+        null);
     applicationReports.add(newApplicationReport5);
 
     ApplicationId applicationId6 = ApplicationId.newInstance(1234, 10);
@@ -397,7 +396,7 @@ public class TestYarnCLI {
         "user6", "queue6", "appname6", "host6", 129, null,
         YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6,
         FinalApplicationStatus.KILLED, null, "N/A", 0.99789f, "PIG",
-        null, null);
+        null);
     applicationReports.add(newApplicationReport6);
 
     // Test command yarn application -list
@@ -821,8 +820,7 @@ public class TestYarnCLI {
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
-        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
-        null);
+        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport2);
     int result = cli.run(new String[] { "application","-kill", applicationId.toString() });
@@ -835,8 +833,7 @@ public class TestYarnCLI {
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
-        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
-        null);
+        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport);
     result = cli.run(new String[] { "application","-kill", applicationId.toString() });
@@ -871,8 +868,7 @@ public class TestYarnCLI {
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
-        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
-        null);
+        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport2);
     int result = cli.run(new String[] { "application", "-movetoqueue",
@@ -887,8 +883,7 @@ public class TestYarnCLI {
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
-        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
-        null);
+        FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport);
     result = cli.run(new String[] { "application", "-movetoqueue",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
index a2ea0a7..9302d4b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
@@ -58,8 +58,7 @@ public class TestApplicatonReport {
         ApplicationReport.newInstance(appId, appAttemptId, "user", "queue",
           "appname", "host", 124, null, YarnApplicationState.FINISHED,
           "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
-          "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null,
-          null);
+          "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
     return appReport;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
index 766e0cb..479858f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
@@ -139,7 +139,7 @@ public class ApplicationHistoryManagerImpl extends AbstractService implements
       appHistory.getYarnApplicationState(), appHistory.getDiagnosticsInfo(),
       trackingUrl, appHistory.getStartTime(), appHistory.getFinishTime(),
       appHistory.getFinalApplicationStatus(), null, "", 100,
-      appHistory.getApplicationType(), null, null);
+      appHistory.getApplicationType(), null);
   }
 
   private ApplicationAttemptHistoryData getLastAttempt(ApplicationId appId)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 0c7fdc0..9bfd2d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -19,14 +19,11 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice;
 
 import java.io.IOException;
-import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -252,7 +249,6 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
     FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
     YarnApplicationState state = YarnApplicationState.ACCEPTED;
     ApplicationResourceUsageReport appResources = null;
-    Set<String> appTags = null;
     Map<ApplicationAccessType, String> appViewACLs =
         new HashMap<ApplicationAccessType, String>();
     Map<String, Object> entityInfo = entity.getOtherInfo();
@@ -274,7 +270,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
             ConverterUtils.toApplicationId(entity.getEntityId()),
             latestApplicationAttemptId, user, queue, name, null, -1, null, state,
             diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null,
-            null, progress, type, null, appTags), appViewACLs);
+            null, progress, type, null), appViewACLs);
       }
       if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
         queue =
@@ -299,17 +295,6 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         appResources=ApplicationResourceUsageReport
             .newInstance(0, 0, null, null, null, memorySeconds, vcoreSeconds);
       }
-      if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
-        appTags = new HashSet<String>();
-        Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
-        if (obj != null && obj instanceof Collection<?>) {
-          for(Object o : (Collection<?>)obj) {
-            if (o != null) {
-              appTags.add(o.toString());
-            }
-          }
-        }
-      }
     }
     List<TimelineEvent> events = entity.getEvents();
     if (events != null) {
@@ -362,7 +347,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         ConverterUtils.toApplicationId(entity.getEntityId()),
         latestApplicationAttemptId, user, queue, name, null, -1, null, state,
         diagnosticsInfo, null, createdTime, finishedTime, finalStatus, appResources,
-        null, progress, type, null, appTags), appViewACLs);
+        null, progress, type, null), appViewACLs);
   }
 
   private static ApplicationAttemptReport convertToApplicationAttemptReport(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
index dbd75ac..8672953 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
@@ -22,9 +22,7 @@ import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
-import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
@@ -187,9 +185,6 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       Assert.assertEquals(Integer.MAX_VALUE + 3L
           + +app.getApplicationId().getId(), app.getFinishTime());
       Assert.assertTrue(Math.abs(app.getProgress() - 1.0F) < 0.0001);
-      Assert.assertEquals(2, app.getApplicationTags().size());
-      Assert.assertTrue(app.getApplicationTags().contains("Test_APP_TAGS_1"));
-      Assert.assertTrue(app.getApplicationTags().contains("Test_APP_TAGS_2"));
       // App 2 doesn't have the ACLs, such that the default ACLs " " will be used.
       // Nobody except admin and owner has access to the details of the app.
       if ((i ==  1 && callerUGI != null &&
@@ -476,10 +471,6 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO,
           "user2");
     }
-    Set<String> appTags = new HashSet<String>();
-    appTags.add("Test_APP_TAGS_1");
-    appTags.add("Test_APP_TAGS_2");
-    entityInfo.put(ApplicationMetricsConstants.APP_TAGS_INFO, appTags);
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setEventType(ApplicationMetricsConstants.CREATED_EVENT_TYPE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index f452410..df8eecb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -73,5 +73,4 @@ public class ApplicationMetricsConstants {
   public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
       "YARN_APPLICATION_LATEST_APP_ATTEMPT";
 
-  public static final String APP_TAGS_INFO = "YARN_APPLICATION_TAGS";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
index 7c43aa4..2373b3b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 
-import java.util.Set;
-
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 
 public class ApplicationCreatedEvent extends
@@ -31,7 +29,6 @@ public class ApplicationCreatedEvent extends
   private String user;
   private String queue;
   private long submittedTime;
-  private Set<String> appTags;
 
   public ApplicationCreatedEvent(ApplicationId appId,
       String name,
@@ -39,8 +36,7 @@ public class ApplicationCreatedEvent extends
       String user,
       String queue,
       long submittedTime,
-      long createdTime,
-      Set<String> appTags) {
+      long createdTime) {
     super(SystemMetricsEventType.APP_CREATED, createdTime);
     this.appId = appId;
     this.name = name;
@@ -48,7 +44,6 @@ public class ApplicationCreatedEvent extends
     this.user = user;
     this.queue = queue;
     this.submittedTime = submittedTime;
-    this.appTags = appTags;
   }
 
   @Override
@@ -80,7 +75,4 @@ public class ApplicationCreatedEvent extends
     return submittedTime;
   }
 
-  public Set<String> getAppTags() {
-    return appTags;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
index 63461b5..2828aec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
@@ -107,7 +107,7 @@ public class SystemMetricsPublisher extends CompositeService {
               app.getUser(),
               app.getQueue(),
               app.getSubmitTime(),
-              createdTime, app.getApplicationTags()));
+              createdTime));
     }
   }
 
@@ -252,8 +252,6 @@ public class SystemMetricsPublisher extends CompositeService {
         event.getQueue());
     entityInfo.put(ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO,
         event.getSubmittedTime());
-    entityInfo.put(ApplicationMetricsConstants.APP_TAGS_INFO,
-        event.getAppTags());
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setEventType(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 34247e7..e52b054 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -313,7 +313,7 @@ public abstract class MockAsm extends MockApps {
             getName(), null, 0, null, null, getDiagnostics().toString(), 
             getTrackingUrl(), getStartTime(), getFinishTime(), 
             getFinalApplicationStatus(), usageReport , null, getProgress(),
-            type, null, null);
+            type, null);
         return report;
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index b122bc4..7c4b5e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -21,11 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
-import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -147,8 +143,6 @@ public class TestSystemMetricsPublisher {
       Assert.assertEquals(app.getSubmitTime(),
           entity.getOtherInfo().get(
               ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO));
-      Assert.assertTrue(verifyAppTags(app.getApplicationTags(),
-          entity.getOtherInfo()));
       if (i == 1) {
         Assert.assertEquals("uers1,user2",
             entity.getOtherInfo().get(
@@ -358,10 +352,6 @@ public class TestSystemMetricsPublisher {
         FinalApplicationStatus.UNDEFINED);
     when(app.getRMAppMetrics()).thenReturn(
         new RMAppMetrics(null, 0, 0, Integer.MAX_VALUE, Long.MAX_VALUE));
-    Set<String> appTags = new HashSet<String>();
-    appTags.add("test");
-    appTags.add("tags");
-    when(app.getApplicationTags()).thenReturn(appTags);
     return app;
   }
 
@@ -402,31 +392,4 @@ public class TestSystemMetricsPublisher {
     return container;
   }
 
-  private static boolean verifyAppTags(Set<String> appTags,
-      Map<String, Object> entityInfo) {
-    if (!entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
-      return false;
-    }
-    Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
-    if (obj instanceof Collection<?>) {
-      Collection<?> collection = (Collection<?>) obj;
-      if (collection.size() != appTags.size()) {
-        return false;
-      }
-      for (String appTag : appTags) {
-        boolean match = false;
-        for (Object o : collection) {
-          if (o.toString().equals(appTag)) {
-            match = true;
-            break;
-          }
-        }
-        if (!match) {
-          return false;
-        }
-      }
-      return true;
-    }
-    return false;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
index 38eebfe..0290421 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -253,7 +253,7 @@ public class TestRMWebApp {
               app.getStartTime(), app.getFinishTime(),
               app.getFinalApplicationStatus(),
               (ApplicationResourceUsageReport) null, app.getTrackingUrl(),
-              app.getProgress(), app.getApplicationType(), (Token) null, null);
+              app.getProgress(), app.getApplicationType(), (Token) null);
       appReports.add(appReport);
     }
     GetApplicationsResponse response = mock(GetApplicationsResponse.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bff83ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 1b66606..90f9512 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -1125,27 +1125,7 @@ Response Body:
           "submittedTime":1430424769395,
           "startedTime":1430424769395,
           "finishedTime":1430424776594,
-          "elapsedTime":7199},
-          {
-          "appId":"application_1430424020775_0001",
-          "currentAppAttemptId":"appattempt_1430424020775_0001_000001",
-          "user":"zshen",
-          "name":"QuasiMonteCarlo",
-          "queue":"default",
-          "type":"MAPREDUCE",
-          "host":"localhost",
-          "rpcPort":56264,
-          "appState":"FINISHED",
-          "progress":100.0,
-          "diagnosticsInfo":"",
-          "originalTrackingUrl":"http://d-69-91-129-173.dhcp4.washington.edu:19888/jobhistory/job/job_1430424020775_0001",
-          "trackingUrl":"http://d-69-91-129-173.dhcp4.washington.edu:8088/proxy/application_1430424020775_0001/",
-          "finalAppStatus":"SUCCEEDED",
-          "submittedTime":1430424053809,
-          "startedTime":1430424072153,
-          "finishedTime":1430424776594,
-          "elapsedTime":18344,
-          "applicationTags":"mrapplication,ta-example"
+          "elapsedTime":7199
           }
       ]
     }
@@ -1247,7 +1227,6 @@ Response Body:
         <startedTime>1430424053809</startedTime>
         <finishedTime>1430424072153</finishedTime>
         <elapsedTime>18344</elapsedTime>
-        <applicationTags>mrapplication,ta-example</applicationTags>
       </app>
     </apps>
 
@@ -1296,8 +1275,7 @@ None
 | `allocatedVCores` | int | The sum of virtual cores allocated to the application's running containers |
 | `currentAppAttemptId` | string | The latest application attempt ID |
 | `host` | string | The host of the ApplicationMaster |
-| `rpcPort` | int | The RPC port of the ApplicationMaster; zero if no IPC service declared |
-| `applicationTags` | string | The application tags. |
+| `rpcPort` | int | The RPC port of the ApplicationMaster; zero if no IPC service declared. |
 
 ### Response Examples:
 
@@ -1333,8 +1311,7 @@ Response Body:
       "submittedTime": 1430424053809,
       "startedTime": 1430424053809,
       "finishedTime": 1430424072153,
-      "elapsedTime": 18344,
-      "applicationTags": mrapplication,tag-example
+      "elapsedTime": 18344
     }
 
 #### XML response
@@ -1372,7 +1349,6 @@ Response Body:
        <startedTime>1430424053809</startedTime>
        <finishedTime>1430424072153</finishedTime>
        <elapsedTime>18344</elapsedTime>
-       <applicationTags>mrapplication,ta-example</applicationTags>
      </app>
 
 ## <a name="REST_API_APPLICATION_ATTEMPT_LIST"></a>Application Attempt List


[23/50] hadoop git commit: HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream creation causes NullPointerException. Contributed by Masatake Iwasaki.

Posted by zj...@apache.org.
HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream creation causes NullPointerException. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/993bf8b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/993bf8b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/993bf8b3

Branch: refs/heads/YARN-2928
Commit: 993bf8b3e638bffcaebe445de36e2adbfd97561a
Parents: 08525ff
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Jun 4 12:51:00 2015 -0500
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:56 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  3 +++
 .../apache/hadoop/hdfs/TestDFSInputStream.java  | 25 ++++++++++++++++++++
 3 files changed, 31 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/993bf8b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d65e513..bb65105 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -846,6 +846,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-3716. Purger should remove stale fsimage ckpt files
     (J.Andreina via vinayakumarb)
 
+    HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream creation
+    causes NullPointerException (Masatake Iwasaki via kihwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/993bf8b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 8a3f730..6563d7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1533,6 +1533,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
    */
   @Override
   public synchronized boolean seekToNewSource(long targetPos) throws IOException {
+    if (currentNode == null) {
+      return seekToBlockSource(targetPos);
+    }
     boolean markedDead = deadNodes.containsKey(currentNode);
     addToDeadNodes(currentNode);
     DatanodeInfo oldNode = currentNode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/993bf8b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index b9ec2ce..26412c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.hamcrest.CoreMatchers.equalTo;
 
 import java.io.File;
@@ -28,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.junit.Assume;
@@ -111,4 +114,26 @@ public class TestDFSInputStream {
     }
   }
 
+  @Test(timeout=60000)
+  public void testSeekToNewSource() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    Path path = new Path("/testfile");
+    DFSTestUtil.createFile(fs, path, 1024, (short) 3, 0);
+    DFSInputStream fin = fs.dfs.open("/testfile");
+    try {
+      fin.seekToNewSource(100);
+      assertEquals(100, fin.getPos());
+      DatanodeInfo firstNode = fin.getCurrentDatanode();
+      assertNotNull(firstNode);
+      fin.seekToNewSource(100);
+      assertEquals(100, fin.getPos());
+      assertFalse(firstNode.equals(fin.getCurrentDatanode()));
+    } finally {
+      fin.close();
+      cluster.shutdown();
+    }
+  }
 }


[42/50] hadoop git commit: MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections. Contributed by Chang Li

Posted by zj...@apache.org.
MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections. Contributed by Chang Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8153dd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8153dd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8153dd6

Branch: refs/heads/YARN-2928
Commit: f8153dd6bf8e72b1c0611a650cc5a55e1c66971b
Parents: 33c0302
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Jun 5 22:38:31 2015 +0000
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:00 2015 -0700

----------------------------------------------------------------------
 .../src/main/conf/log4j.properties              | 21 ++++++++++++--------
 hadoop-mapreduce-project/CHANGES.txt            |  3 +++
 .../apache/hadoop/mapred/ShuffleHandler.java    | 15 ++++++++++++--
 3 files changed, 29 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8153dd6/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 3a0a3ad..dcffead 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -67,7 +67,7 @@ log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 
 #
 # console
-# Add "console" to rootlogger above if you want to use this 
+# Add "console" to rootlogger above if you want to use this
 #
 
 log4j.appender.console=org.apache.log4j.ConsoleAppender
@@ -110,7 +110,7 @@ hadoop.security.log.maxfilesize=256MB
 hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 hadoop.security.log.file=SecurityAuth-${user.name}.audit
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
 log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
@@ -120,7 +120,7 @@ log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
 #
 # Daily Rolling Security appender
 #
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
@@ -184,9 +184,9 @@ log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 
 #
-# Job Summary Appender 
+# Job Summary Appender
 #
-# Use following logger to send summary to separate file defined by 
+# Use following logger to send summary to separate file defined by
 # hadoop.mapreduce.jobsummary.log.file :
 # hadoop.mapreduce.jobsummary.logger=INFO,JSA
 # 
@@ -204,7 +204,12 @@ log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduc
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
 #
-# Yarn ResourceManager Application Summary Log 
+# shuffle connection log from shuffleHandler
+# Uncomment the following line to enable logging of shuffle connections
+# log4j.logger.org.apache.hadoop.mapred.ShuffleHandler.audit=DEBUG
+
+#
+# Yarn ResourceManager Application Summary Log
 #
 # Set the ResourceManager summary log filename
 yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
@@ -212,8 +217,8 @@ yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
 yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 
-# To enable AppSummaryLogging for the RM, 
-# set yarn.server.resourcemanager.appsummary.logger to 
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
 # <LEVEL>,RMSUMMARY in hadoop-env.sh
 
 # Appender for ResourceManager Application Summary Log

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8153dd6/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index e7c02c0..4202ae4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -364,6 +364,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6383. Pi job (QuasiMonteCarlo) should not try to read the 
     results file if its job fails. (Harsh J via devaraj)
 
+    MAPREDUCE-6354. ShuffleHandler should be able to log shuffle connections
+    (Chang Li via jlowe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8153dd6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 6e069f1..eedf42b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -136,7 +136,8 @@ import com.google.protobuf.ByteString;
 public class ShuffleHandler extends AuxiliaryService {
 
   private static final Log LOG = LogFactory.getLog(ShuffleHandler.class);
-  
+  private static final Log AUDITLOG =
+      LogFactory.getLog(ShuffleHandler.class.getName()+".audit");
   public static final String SHUFFLE_MANAGE_OS_CACHE = "mapreduce.shuffle.manage.os.cache";
   public static final boolean DEFAULT_SHUFFLE_MANAGE_OS_CACHE = true;
 
@@ -751,6 +752,14 @@ public class ShuffleHandler extends AuxiliaryService {
         sendError(ctx, "Too many job/reduce parameters", BAD_REQUEST);
         return;
       }
+
+      // this audit log is disabled by default,
+      // to turn it on please enable this audit log
+      // on log4j.properties by uncommenting the setting
+      if (AUDITLOG.isDebugEnabled()) {
+        AUDITLOG.debug("shuffle for " + jobQ.get(0) +
+                         " reducer " + reduceQ.get(0));
+      }
       int reduceId;
       String jobId;
       try {
@@ -897,7 +906,9 @@ public class ShuffleHandler extends AuxiliaryService {
     protected void setResponseHeaders(HttpResponse response,
         boolean keepAliveParam, long contentLength) {
       if (!connectionKeepAliveEnabled && !keepAliveParam) {
-        LOG.info("Setting connection close header...");
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Setting connection close header...");
+        }
         response.setHeader(HttpHeaders.CONNECTION, CONNECTION_CLOSE);
       } else {
         response.setHeader(HttpHeaders.CONTENT_LENGTH,


[29/50] hadoop git commit: YARN-2392. Add more diags about app retry limits on AM failures. Contributed by Steve Loughran

Posted by zj...@apache.org.
YARN-2392. Add more diags about app retry limits on AM failures. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b200b880
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b200b880
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b200b880

Branch: refs/heads/YARN-2928
Commit: b200b88082b28fd375d440e4e9093143a35639c6
Parents: d9ee232
Author: Jian He <ji...@apache.org>
Authored: Thu Jun 4 11:14:09 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:57 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                     |  3 +++
 .../server/resourcemanager/rmapp/RMAppImpl.java     | 16 +++++++++++++---
 .../rmapp/attempt/RMAppAttemptImpl.java             |  4 ++--
 3 files changed, 18 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b200b880/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 972066d..1c36c9b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -380,6 +380,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3467. Expose allocatedMB, allocatedVCores, and runningContainers metrics on 
     running Applications in RM Web UI. (Anubhav Dhoot via kasha)
 
+    YARN-2392. Add more diags about app retry limits on AM failures. (Steve
+    Loughran via jianhe)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b200b880/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 040ee49..a68fc77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -1076,9 +1076,19 @@ public class RMAppImpl implements RMApp, Recoverable {
               + " failed due to " + failedEvent.getDiagnostics()
               + ". Failing the application.";
     } else if (this.isNumAttemptsBeyondThreshold) {
-      msg = "Application " + this.getApplicationId() + " failed "
-              + this.maxAppAttempts + " times due to "
-              + failedEvent.getDiagnostics() + ". Failing the application.";
+      int globalLimit = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+          YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+      msg = String.format(
+        "Application %s failed %d times%s%s due to %s. Failing the application.",
+          getApplicationId(),
+          maxAppAttempts,
+          (attemptFailuresValidityInterval <= 0 ? ""
+               : (" in previous " + attemptFailuresValidityInterval
+                  + " milliseconds")),
+          (globalLimit == maxAppAttempts) ? ""
+              : (" (global limit =" + globalLimit
+                 + "; local limit is =" + maxAppAttempts + ")"),
+          failedEvent.getDiagnostics());
     }
     return msg;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b200b880/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 684dde8..5171bba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1459,9 +1459,9 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         .append(status.getDiagnostics());
     if (this.getTrackingUrl() != null) {
       diagnosticsBuilder.append("For more detailed output,").append(
-        " check application tracking page: ").append(
+        " check the application tracking page: ").append(
         this.getTrackingUrl()).append(
-        " Then, click on links to logs of each attempt.\n");
+        " Then click on links to logs of each attempt.\n");
     }
     return diagnosticsBuilder.toString();
   }


[45/50] hadoop git commit: HDFS-8432. Introduce a minimum compatible layout version to allow downgrade in more rolling upgrade use cases. Contributed by Chris Nauroth.

Posted by zj...@apache.org.
HDFS-8432. Introduce a minimum compatible layout version to allow downgrade in more rolling upgrade use cases. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcf4319c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcf4319c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcf4319c

Branch: refs/heads/YARN-2928
Commit: bcf4319c4128b3beb554eed9c0950e6f2c70be29
Parents: cd8bd6b
Author: cnauroth <cn...@apache.org>
Authored: Sat Jun 6 09:43:47 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:01 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/protocol/LayoutVersion.java     |  51 +++++++++-
 .../hdfs/server/namenode/BackupImage.java       |   2 +-
 .../hdfs/server/namenode/Checkpointer.java      |   4 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  24 ++---
 .../hadoop/hdfs/server/namenode/FSImage.java    |  20 ++--
 .../server/namenode/FSImageFormatProtobuf.java  |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java      | 100 +++++++++++++-----
 .../hadoop/hdfs/server/namenode/NameNode.java   |   3 +-
 .../server/namenode/NameNodeLayoutVersion.java  |  46 ++++++---
 .../hdfs/server/namenode/SecondaryNameNode.java |   4 +-
 .../hadoop/hdfs/protocol/TestLayoutVersion.java | 101 ++++++++++++++++++-
 .../hdfs/server/namenode/CreateEditsLog.java    |   2 +-
 .../hdfs/server/namenode/FSImageTestUtil.java   |   2 +-
 .../hdfs/server/namenode/TestEditLog.java       |  16 +--
 .../server/namenode/TestFSEditLogLoader.java    |   2 +-
 16 files changed, 306 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 72ab17b..f7f7f98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -603,6 +603,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all
     block replicas. (Eddy Xu via wang)
 
+    HDFS-8432. Introduce a minimum compatible layout version to allow downgrade
+    in more rolling upgrade use cases. (cnauroth)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
index 349f72c..c893744 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
@@ -162,14 +162,22 @@ public class LayoutVersion {
   public static class FeatureInfo {
     private final int lv;
     private final int ancestorLV;
+    private final Integer minCompatLV;
     private final String description;
     private final boolean reserved;
     private final LayoutFeature[] specialFeatures;
 
     public FeatureInfo(final int lv, final int ancestorLV, final String description,
         boolean reserved, LayoutFeature... specialFeatures) {
+      this(lv, ancestorLV, null, description, reserved, specialFeatures);
+    }
+
+    public FeatureInfo(final int lv, final int ancestorLV, Integer minCompatLV,
+        final String description, boolean reserved,
+        LayoutFeature... specialFeatures) {
       this.lv = lv;
       this.ancestorLV = ancestorLV;
+      this.minCompatLV = minCompatLV;
       this.description = description;
       this.reserved = reserved;
       this.specialFeatures = specialFeatures;
@@ -191,7 +199,20 @@ public class LayoutVersion {
       return ancestorLV;
     }
 
-    /** 
+    /**
+     * Accessor method for feature minimum compatible layout version.  If the
+     * feature does not define a minimum compatible layout version, then this
+     * method returns the feature's own layout version.  This would indicate
+     * that the feature cannot provide compatibility with any prior layout
+     * version.
+     *
+     * @return int minimum compatible LV value
+     */
+    public int getMinimumCompatibleLayoutVersion() {
+      return minCompatLV != null ? minCompatLV : lv;
+    }
+
+    /**
      * Accessor method for feature description 
      * @return String feature description 
      */
@@ -220,8 +241,23 @@ public class LayoutVersion {
       LayoutFeature[] features) {
     // Go through all the enum constants and build a map of
     // LayoutVersion <-> Set of all supported features in that LayoutVersion
+    SortedSet<LayoutFeature> existingFeatures = new TreeSet<LayoutFeature>(
+        new LayoutFeatureComparator());
+    for (SortedSet<LayoutFeature> s : map.values()) {
+      existingFeatures.addAll(s);
+    }
+    LayoutFeature prevF = existingFeatures.isEmpty() ? null :
+        existingFeatures.first();
     for (LayoutFeature f : features) {
       final FeatureInfo info = f.getInfo();
+      int minCompatLV = info.getMinimumCompatibleLayoutVersion();
+      if (prevF != null &&
+          minCompatLV > prevF.getInfo().getMinimumCompatibleLayoutVersion()) {
+        throw new AssertionError(String.format(
+            "Features must be listed in order of minimum compatible layout " +
+            "version.  Check features %s and %s.", prevF, f));
+      }
+      prevF = f;
       SortedSet<LayoutFeature> ancestorSet = map.get(info.getAncestorLayoutVersion());
       if (ancestorSet == null) {
         // Empty set
@@ -282,6 +318,18 @@ public class LayoutVersion {
     return getLastNonReservedFeature(features).getInfo().getLayoutVersion();
   }
 
+  /**
+   * Gets the minimum compatible layout version.
+   *
+   * @param features all features to check
+   * @return minimum compatible layout version
+   */
+  public static int getMinimumCompatibleLayoutVersion(
+      LayoutFeature[] features) {
+    return getLastNonReservedFeature(features).getInfo()
+        .getMinimumCompatibleLayoutVersion();
+  }
+
   static LayoutFeature getLastNonReservedFeature(LayoutFeature[] features) {
     for (int i = features.length -1; i >= 0; i--) {
       final FeatureInfo info = features[i].getInfo();
@@ -292,4 +340,3 @@ public class LayoutVersion {
     throw new AssertionError("All layout versions are reserved.");
   }
 }
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index f0879ee..ae4e874 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -333,7 +333,7 @@ public class BackupImage extends FSImage {
    * directories.
    */
   synchronized void namenodeStartedLogSegment(long txid) throws IOException {
-    editLog.startLogSegment(txid, true);
+    editLog.startLogSegment(txid, true, namesystem.getEffectiveLayoutVersion());
 
     if (bnState == BNState.DROP_UNTIL_NEXT_ROLL) {
       setState(BNState.JOURNAL_ONLY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index 011a459..25b87f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -257,7 +257,9 @@ class Checkpointer extends Daemon {
         backupNode.namesystem.setBlockTotal();
       }
       bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
-      bnStorage.writeAll();
+      if (!backupNode.namesystem.isRollingUpgrade()) {
+        bnStorage.writeAll();
+      }
     } finally {
       backupNode.namesystem.writeUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index dec9709..1b0b572 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -301,7 +301,7 @@ public class FSEditLog implements LogsPurgeable {
    * Initialize the output stream for logging, opening the first
    * log segment.
    */
-  synchronized void openForWrite() throws IOException {
+  synchronized void openForWrite(int layoutVersion) throws IOException {
     Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
         "Bad state: %s", state);
 
@@ -318,7 +318,7 @@ public class FSEditLog implements LogsPurgeable {
       throw new IllegalStateException(error);
     }
     
-    startLogSegmentAndWriteHeaderTxn(segmentTxId);
+    startLogSegmentAndWriteHeaderTxn(segmentTxId, layoutVersion);
     assert state == State.IN_SEGMENT : "Bad state: " + state;
   }
   
@@ -1197,12 +1197,12 @@ public class FSEditLog implements LogsPurgeable {
    * @return the transaction id of the BEGIN_LOG_SEGMENT transaction
    * in the new log.
    */
-  synchronized long rollEditLog() throws IOException {
+  synchronized long rollEditLog(int layoutVersion) throws IOException {
     LOG.info("Rolling edit logs");
     endCurrentLogSegment(true);
     
     long nextTxId = getLastWrittenTxId() + 1;
-    startLogSegmentAndWriteHeaderTxn(nextTxId);
+    startLogSegmentAndWriteHeaderTxn(nextTxId, layoutVersion);
     
     assert curSegmentTxId == nextTxId;
     return nextTxId;
@@ -1212,7 +1212,7 @@ public class FSEditLog implements LogsPurgeable {
    * Remote namenode just has started a log segment, start log segment locally.
    */
   public synchronized void startLogSegment(long txid, 
-      boolean abortCurrentLogSegment) throws IOException {
+      boolean abortCurrentLogSegment, int layoutVersion) throws IOException {
     LOG.info("Started a new log segment at txid " + txid);
     if (isSegmentOpen()) {
       if (getLastWrittenTxId() == txid - 1) {
@@ -1234,14 +1234,15 @@ public class FSEditLog implements LogsPurgeable {
       }
     }
     setNextTxId(txid);
-    startLogSegment(txid);
+    startLogSegment(txid, layoutVersion);
   }
   
   /**
    * Start writing to the log segment with the given txid.
    * Transitions from BETWEEN_LOG_SEGMENTS state to IN_LOG_SEGMENT state. 
    */
-  private void startLogSegment(final long segmentTxId) throws IOException {
+  private void startLogSegment(final long segmentTxId, int layoutVersion)
+      throws IOException {
     assert Thread.holdsLock(this);
 
     LOG.info("Starting log segment at " + segmentTxId);
@@ -1263,8 +1264,7 @@ public class FSEditLog implements LogsPurgeable {
     storage.attemptRestoreRemovedStorage();
     
     try {
-      editLogStream = journalSet.startLogSegment(segmentTxId,
-          NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+      editLogStream = journalSet.startLogSegment(segmentTxId, layoutVersion);
     } catch (IOException ex) {
       throw new IOException("Unable to start log segment " +
           segmentTxId + ": too few journals successfully started.", ex);
@@ -1274,9 +1274,9 @@ public class FSEditLog implements LogsPurgeable {
     state = State.IN_SEGMENT;
   }
 
-  synchronized void startLogSegmentAndWriteHeaderTxn(final long segmentTxId
-      ) throws IOException {
-    startLogSegment(segmentTxId);
+  synchronized void startLogSegmentAndWriteHeaderTxn(final long segmentTxId,
+      int layoutVersion) throws IOException {
+    startLogSegment(segmentTxId, layoutVersion);
 
     logEdit(LogSegmentOp.getInstance(cache.get(),
         FSEditLogOpCodes.OP_START_LOG_SEGMENT));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index cd7cf18..0dd7855 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -572,9 +572,9 @@ public class FSImage implements Closeable {
     return editLog;
   }
 
-  void openEditLogForWrite() throws IOException {
+  void openEditLogForWrite(int layoutVersion) throws IOException {
     assert editLog != null : "editLog must be initialized";
-    editLog.openForWrite();
+    editLog.openForWrite(layoutVersion);
     storage.writeTransactionIdFileToStorage(editLog.getCurSegmentTxId());
   }
   
@@ -1127,10 +1127,13 @@ public class FSImage implements Closeable {
     try {
       try {
         saveFSImageInAllDirs(source, nnf, imageTxId, canceler);
-        storage.writeAll();
+        if (!source.isRollingUpgrade()) {
+          storage.writeAll();
+        }
       } finally {
         if (editLogWasOpen) {
-          editLog.startLogSegmentAndWriteHeaderTxn(imageTxId + 1);
+          editLog.startLogSegmentAndWriteHeaderTxn(imageTxId + 1,
+              source.getEffectiveLayoutVersion());
           // Take this opportunity to note the current transaction.
           // Even if the namespace save was cancelled, this marker
           // is only used to determine what transaction ID is required
@@ -1315,8 +1318,8 @@ public class FSImage implements Closeable {
     }
   }
 
-  CheckpointSignature rollEditLog() throws IOException {
-    getEditLog().rollEditLog();
+  CheckpointSignature rollEditLog(int layoutVersion) throws IOException {
+    getEditLog().rollEditLog(layoutVersion);
     // Record this log segment ID in all of the storage directories, so
     // we won't miss this log segment on a restart if the edits directories
     // go missing.
@@ -1341,7 +1344,8 @@ public class FSImage implements Closeable {
    * @throws IOException
    */
   NamenodeCommand startCheckpoint(NamenodeRegistration bnReg, // backup node
-                                  NamenodeRegistration nnReg) // active name-node
+                                  NamenodeRegistration nnReg,
+                                  int layoutVersion) // active name-node
   throws IOException {
     LOG.info("Start checkpoint at txid " + getEditLog().getLastWrittenTxId());
     String msg = null;
@@ -1370,7 +1374,7 @@ public class FSImage implements Closeable {
     if(storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0)
       // do not return image if there are no image directories
       needToReturnImg = false;
-    CheckpointSignature sig = rollEditLog();
+    CheckpointSignature sig = rollEditLog(layoutVersion);
     return new CheckpointCommand(sig, needToReturnImg);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 69e9bb5..7c8a857 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -465,7 +465,8 @@ public final class FSImageFormatProtobuf {
 
       FileSummary.Builder b = FileSummary.newBuilder()
           .setOndiskVersion(FSImageUtil.FILE_VERSION)
-          .setLayoutVersion(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+          .setLayoutVersion(
+              context.getSourceNamesystem().getEffectiveLayoutVersion());
 
       codec = compression.getImageCodec();
       if (codec != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index dfbf04e..d3d98fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -210,7 +210,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
@@ -225,6 +224,7 @@ import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
@@ -992,8 +992,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (needToSave) {
         fsImage.saveNamespace(this);
       } else {
-        updateStorageVersionForRollingUpgrade(fsImage.getLayoutVersion(),
-            startOpt);
         // No need to save, so mark the phase done.
         StartupProgress prog = NameNode.getStartupProgress();
         prog.beginPhase(Phase.SAVING_CHECKPOINT);
@@ -1003,7 +1001,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       // we shouldn't do it when coming up in standby state
       if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)
           || (haEnabled && startOpt == StartupOption.UPGRADEONLY)) {
-        fsImage.openEditLogForWrite();
+        fsImage.openEditLogForWrite(getEffectiveLayoutVersion());
       }
       success = true;
     } finally {
@@ -1015,18 +1013,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     imageLoadComplete();
   }
 
-  private void updateStorageVersionForRollingUpgrade(final long layoutVersion,
-      StartupOption startOpt) throws IOException {
-    boolean rollingStarted = RollingUpgradeStartupOption.STARTED
-        .matches(startOpt) && layoutVersion > HdfsServerConstants
-        .NAMENODE_LAYOUT_VERSION;
-    boolean rollingRollback = RollingUpgradeStartupOption.ROLLBACK
-        .matches(startOpt);
-    if (rollingRollback || rollingStarted) {
-      fsImage.updateStorageVersion();
-    }
-  }
-
   private void startSecretManager() {
     if (dtSecretManager != null) {
       try {
@@ -1144,7 +1130,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             nextTxId);
         editLog.setNextTxId(nextTxId);
 
-        getFSImage().editLog.openForWrite();
+        getFSImage().editLog.openForWrite(getEffectiveLayoutVersion());
       }
 
       // Enable quota checks.
@@ -1941,6 +1927,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
                    String clientName, String clientMachine,
                    long mtime)
       throws IOException, UnresolvedLinkException {
+    requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
     boolean ret;
     try {
       ret = truncateInt(src, newLength, clientName, clientMachine, mtime);
@@ -2564,7 +2551,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
 
     if (writeToEditLog) {
-      getEditLog().logAppendFile(src, file, newBlock, logRetryCache);
+      if (NameNodeLayoutVersion.supports(Feature.APPEND_NEW_BLOCK,
+          getEffectiveLayoutVersion())) {
+        getEditLog().logAppendFile(src, file, newBlock, logRetryCache);
+      } else {
+        getEditLog().logOpenFile(src, file, false, logRetryCache);
+      }
     }
     return ret;
   }
@@ -2759,9 +2751,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   LastBlockWithStatus appendFile(String src, String holder,
       String clientMachine, EnumSet<CreateFlag> flag, boolean logRetryCache)
       throws IOException {
+    boolean newBlock = flag.contains(CreateFlag.NEW_BLOCK);
+    if (newBlock) {
+      requireEffectiveLayoutVersionForFeature(Feature.APPEND_NEW_BLOCK);
+    }
     try {
-      return appendFileInt(src, holder, clientMachine,
-          flag.contains(CreateFlag.NEW_BLOCK), logRetryCache);
+      return appendFileInt(src, holder, clientMachine, newBlock, logRetryCache);
     } catch (AccessControlException e) {
       logAuditEvent(false, "append", src);
       throw e;
@@ -3365,6 +3360,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
       throws IOException {
+    if (type != null) {
+      requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE);
+    }
     checkOperation(OperationCategory.WRITE);
     writeLock();
     boolean success = false;
@@ -5242,7 +5240,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (Server.isRpcInvocation()) {
         LOG.info("Roll Edit Log from " + Server.getRemoteAddress());
       }
-      return getFSImage().rollEditLog();
+      return getFSImage().rollEditLog(getEffectiveLayoutVersion());
     } finally {
       writeUnlock();
     }
@@ -5258,7 +5256,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       
       LOG.info("Start checkpoint for " + backupNode.getAddress());
       NamenodeCommand cmd = getFSImage().startCheckpoint(backupNode,
-          activeNamenode);
+          activeNamenode, getEffectiveLayoutVersion());
       getEditLog().logSync();
       return cmd;
     } finally {
@@ -6882,7 +6880,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       getEditLog().logStartRollingUpgrade(rollingUpgradeInfo.getStartTime());
       if (haEnabled) {
         // roll the edit log to make sure the standby NameNode can tail
-        getFSImage().rollEditLog();
+        getFSImage().rollEditLog(getEffectiveLayoutVersion());
       }
     } finally {
       writeUnlock();
@@ -6985,6 +6983,60 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return rollingUpgradeInfo != null && !rollingUpgradeInfo.isFinalized();
   }
 
+  /**
+   * Returns the layout version in effect.  Under normal operation, this is the
+   * same as the software's current layout version, defined in
+   * {@link NameNodeLayoutVersion#CURRENT_LAYOUT_VERSION}.  During a rolling
+   * upgrade, this can retain the layout version that was persisted to metadata
+   * prior to starting the rolling upgrade, back to a lower bound defined in
+   * {@link NameNodeLayoutVersion#MINIMUM_COMPATIBLE_LAYOUT_VERSION}.  New
+   * fsimage files and edit log segments will continue to be written with this
+   * older layout version, so that the files are still readable by the old
+   * software version if the admin chooses to downgrade.
+   *
+   * @return layout version in effect
+   */
+  public int getEffectiveLayoutVersion() {
+    if (isRollingUpgrade()) {
+      int storageLV = fsImage.getStorage().getLayoutVersion();
+      if (storageLV >=
+          NameNodeLayoutVersion.MINIMUM_COMPATIBLE_LAYOUT_VERSION) {
+        // The prior layout version satisfies the minimum compatible layout
+        // version of the current software.  Keep reporting the prior layout
+        // as the effective one.  Downgrade is possible.
+        return storageLV;
+      }
+    }
+    // The current software cannot satisfy the layout version of the prior
+    // software.  Proceed with using the current layout version.
+    return NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
+  }
+
+  /**
+   * Performs a pre-condition check that the layout version in effect is
+   * sufficient to support the requested {@link Feature}.  If not, then the
+   * method throws {@link HadoopIllegalArgumentException} to deny the operation.
+   * This exception class is registered as a terse exception, so it prevents
+   * verbose stack traces in the NameNode log.  During a rolling upgrade, this
+   * method is used to restrict usage of new features.  This prevents writing
+   * new edit log operations that would be unreadable by the old software
+   * version if the admin chooses to downgrade.
+   *
+   * @param f feature to check
+   * @throws HadoopIllegalArgumentException if the current layout version in
+   *     effect is insufficient to support the feature
+   */
+  private void requireEffectiveLayoutVersionForFeature(Feature f)
+      throws HadoopIllegalArgumentException {
+    int lv = getEffectiveLayoutVersion();
+    if (!NameNodeLayoutVersion.supports(f, lv)) {
+      throw new HadoopIllegalArgumentException(String.format(
+          "Feature %s unsupported at NameNode layout version %d.  If a " +
+          "rolling upgrade is in progress, then it must be finalized before " +
+          "using this feature.", f, lv));
+    }
+  }
+
   void checkRollingUpgrade(String action) throws RollingUpgradeException {
     if (isRollingUpgrade()) {
       throw new RollingUpgradeException("Failed to " + action
@@ -7008,7 +7060,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       getEditLog().logFinalizeRollingUpgrade(rollingUpgradeInfo.getFinalizeTime());
       if (haEnabled) {
         // roll the edit log to make sure the standby NameNode can tail
-        getFSImage().rollEditLog();
+        getFSImage().rollEditLog(getEffectiveLayoutVersion());
       }
       getFSImage().updateStorageVersion();
       getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 1c1032b..268abeb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1157,7 +1157,8 @@ public class NameNode implements NameNodeStatusMXBean {
             LOG.trace("copying op: " + op);
           }
           if (!segmentOpen) {
-            newSharedEditLog.startLogSegment(op.txid, false);
+            newSharedEditLog.startLogSegment(op.txid, false,
+                fsns.getEffectiveLayoutVersion());
             segmentOpen = true;
           }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
index d235e2b..3a5dc12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
@@ -35,6 +35,8 @@ public class NameNodeLayoutVersion {
 
   public static final int CURRENT_LAYOUT_VERSION
       = LayoutVersion.getCurrentLayoutVersion(Feature.values());
+  public static final int MINIMUM_COMPATIBLE_LAYOUT_VERSION
+      = LayoutVersion.getMinimumCompatibleLayoutVersion(Feature.values());
 
   static {
     LayoutVersion.updateMap(FEATURES, LayoutVersion.Feature.values());
@@ -60,43 +62,59 @@ public class NameNodeLayoutVersion {
    * its immediate predecessor, use the constructor where a specific ancestor
    * can be passed.
    * </li>
+   * <li>Specify a minimum compatible layout version.  The minimum compatible
+   * layout version is the earliest prior version to which a downgrade is
+   * possible after initiating rolling upgrade.  If the feature cannot satisfy
+   * compatibility with any prior version, then set its minimum compatible
+   * lqyout version to itself to indicate that downgrade is impossible.
+   * Satisfying compatibility might require adding logic to the new feature to
+   * reject operations or handle them differently while rolling upgrade is in
+   * progress.  In general, it's possible to satisfy compatiblity for downgrade
+   * if the new feature just involves adding new edit log ops.  Deeper
+   * structural changes, such as changing the way we place files in the metadata
+   * directories, might be incompatible.  Feature implementations should strive
+   * for compatibility, because it's in the best interest of our users to
+   * support downgrade.
    * </ul>
    */
   public static enum Feature implements LayoutFeature {
-    ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
-    EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
-    XATTRS(-57, "Extended attributes"),
-    CREATE_OVERWRITE(-58, "Use single editlog record for " +
+    ROLLING_UPGRADE(-55, -53, -55, "Support rolling upgrade", false),
+    EDITLOG_LENGTH(-56, -56, "Add length field to every edit log op"),
+    XATTRS(-57, -57, "Extended attributes"),
+    CREATE_OVERWRITE(-58, -58, "Use single editlog record for " +
       "creating file with overwrite"),
-    XATTRS_NAMESPACE_EXT(-59, "Increase number of xattr namespaces"),
-    BLOCK_STORAGE_POLICY(-60, "Block Storage policy"),
-    TRUNCATE(-61, "Truncate"),
-    APPEND_NEW_BLOCK(-62, "Support appending to new block"),
-    QUOTA_BY_STORAGE_TYPE(-63, "Support quota for specific storage types");
+    XATTRS_NAMESPACE_EXT(-59, -59, "Increase number of xattr namespaces"),
+    BLOCK_STORAGE_POLICY(-60, -60, "Block Storage policy"),
+    TRUNCATE(-61, -61, "Truncate"),
+    APPEND_NEW_BLOCK(-62, -61, "Support appending to new block"),
+    QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types");
 
     private final FeatureInfo info;
 
     /**
      * Feature that is added at layout version {@code lv} - 1. 
      * @param lv new layout version with the addition of this feature
+     * @param minCompatLV minimium compatible layout version
      * @param description description of the feature
      */
-    Feature(final int lv, final String description) {
-      this(lv, lv + 1, description, false);
+    Feature(final int lv, int minCompatLV, final String description) {
+      this(lv, lv + 1, minCompatLV, description, false);
     }
 
     /**
      * NameNode feature that is added at layout version {@code ancestoryLV}.
      * @param lv new layout version with the addition of this feature
      * @param ancestorLV layout version from which the new lv is derived from.
+     * @param minCompatLV minimum compatible layout version
      * @param description description of the feature
      * @param reserved true when this is a layout version reserved for previous
      *        versions
      * @param features set of features that are to be enabled for this version
      */
-    Feature(final int lv, final int ancestorLV, final String description,
-        boolean reserved, Feature... features) {
-      info = new FeatureInfo(lv, ancestorLV, description, reserved, features);
+    Feature(final int lv, final int ancestorLV, int minCompatLV,
+        final String description, boolean reserved, Feature... features) {
+      info = new FeatureInfo(lv, ancestorLV, minCompatLV, description, reserved,
+          features);
     }
     
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 0fa1cd5..2267853 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -1084,6 +1084,8 @@ public class SecondaryNameNode implements Runnable,
     Checkpointer.rollForwardByApplyingLogs(manifest, dstImage, dstNamesystem);
     // The following has the side effect of purging old fsimages/edit logs.
     dstImage.saveFSImageInAllDirs(dstNamesystem, dstImage.getLastAppliedTxId());
-    dstStorage.writeAll();
+    if (!dstNamesystem.isRollingUpgrade()) {
+      dstStorage.writeAll();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
index bb59a17..9f8aef5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
@@ -20,7 +20,13 @@ package org.apache.hadoop.hdfs.protocol;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.*;
 
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import java.util.SortedSet;
 
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
@@ -103,7 +109,100 @@ public class TestLayoutVersion {
     assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,
         first.getInfo().getLayoutVersion());
   }
-  
+
+  /**
+   * Tests expected values for minimum compatible layout version in NameNode
+   * features.  TRUNCATE, APPEND_NEW_BLOCK and QUOTA_BY_STORAGE_TYPE are all
+   * features that launched in the same release.  TRUNCATE was added first, so
+   * we expect all 3 features to have a minimum compatible layout version equal
+   * to TRUNCATE's layout version.  All features older than that existed prior
+   * to the concept of a minimum compatible layout version, so for each one, the
+   * minimum compatible layout version must be equal to itself.
+   */
+  @Test
+  public void testNameNodeFeatureMinimumCompatibleLayoutVersions() {
+    int baseLV = NameNodeLayoutVersion.Feature.TRUNCATE.getInfo()
+        .getLayoutVersion();
+    EnumSet<NameNodeLayoutVersion.Feature> compatibleFeatures = EnumSet.of(
+        NameNodeLayoutVersion.Feature.TRUNCATE,
+        NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK,
+        NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE);
+    for (LayoutFeature f : compatibleFeatures) {
+      assertEquals(String.format("Expected minimum compatible layout version " +
+          "%d for feature %s.", baseLV, f), baseLV,
+          f.getInfo().getMinimumCompatibleLayoutVersion());
+    }
+    List<LayoutFeature> features = new ArrayList<>();
+    features.addAll(EnumSet.allOf(LayoutVersion.Feature.class));
+    features.addAll(EnumSet.allOf(NameNodeLayoutVersion.Feature.class));
+    for (LayoutFeature f : features) {
+      if (!compatibleFeatures.contains(f)) {
+        assertEquals(String.format("Expected feature %s to have minimum " +
+            "compatible layout version set to itself.", f),
+            f.getInfo().getLayoutVersion(),
+            f.getInfo().getMinimumCompatibleLayoutVersion());
+      }
+    }
+  }
+
+  /**
+   * Tests that NameNode features are listed in order of minimum compatible
+   * layout version.  It would be inconsistent to have features listed out of
+   * order with respect to minimum compatible layout version, because it would
+   * imply going back in time to change compatibility logic in a software release
+   * that had already shipped.
+   */
+  @Test
+  public void testNameNodeFeatureMinimumCompatibleLayoutVersionAscending() {
+    LayoutFeature prevF = null;
+    for (LayoutFeature f : EnumSet.allOf(NameNodeLayoutVersion.Feature.class)) {
+      if (prevF != null) {
+        assertTrue(String.format("Features %s and %s not listed in order of " +
+            "minimum compatible layout version.", prevF, f),
+            f.getInfo().getMinimumCompatibleLayoutVersion() <=
+            prevF.getInfo().getMinimumCompatibleLayoutVersion());
+      } else {
+        prevF = f;
+      }
+    }
+  }
+
+  /**
+   * Tests that attempting to add a new NameNode feature out of order with
+   * respect to minimum compatible layout version will fail fast.
+   */
+  @Test(expected=AssertionError.class)
+  public void testNameNodeFeatureMinimumCompatibleLayoutVersionOutOfOrder() {
+    FeatureInfo ancestorF = LayoutVersion.Feature.RESERVED_REL2_4_0.getInfo();
+    LayoutFeature f = mock(LayoutFeature.class);
+    when(f.getInfo()).thenReturn(new FeatureInfo(
+        ancestorF.getLayoutVersion() - 1, ancestorF.getLayoutVersion(),
+        ancestorF.getMinimumCompatibleLayoutVersion() + 1, "Invalid feature.",
+        false));
+    Map<Integer, SortedSet<LayoutFeature>> features = new HashMap<>();
+    LayoutVersion.updateMap(features, LayoutVersion.Feature.values());
+    LayoutVersion.updateMap(features, new LayoutFeature[] { f });
+  }
+
+  /**
+   * Asserts the current minimum compatible layout version of the software, if a
+   * release were created from the codebase right now.  This test is meant to
+   * make developers stop and reconsider if they introduce a change that requires
+   * a new minimum compatible layout version.  This would make downgrade
+   * impossible.
+   */
+  @Test
+  public void testCurrentMinimumCompatibleLayoutVersion() {
+    int expectedMinCompatLV = NameNodeLayoutVersion.Feature.TRUNCATE.getInfo()
+        .getLayoutVersion();
+    int actualMinCompatLV = LayoutVersion.getMinimumCompatibleLayoutVersion(
+        NameNodeLayoutVersion.Feature.values());
+    assertEquals("The minimum compatible layout version has changed.  " +
+        "Downgrade to prior versions is no longer possible.  Please either " +
+        "restore compatibility, or if the incompatibility is intentional, " +
+        "then update this assertion.", expectedMinCompatLV, actualMinCompatLV);
+  }
+
   /**
    * Given feature {@code f}, ensures the layout version of that feature
    * supports all the features supported by it's ancestor.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
index 8a606f5..733dd71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
@@ -203,7 +203,7 @@ public class CreateEditsLog {
 
     FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100);
     FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
-    editLog.openForWrite();
+    editLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     addFiles(editLog, numFiles, replication, numBlocksPerFile, startingBlockId,
              blockSize, nameGenerator);
     editLog.logSync();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
index 445c1e1..68eff19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
@@ -213,7 +213,7 @@ public abstract class FSImageTestUtil {
       long firstTxId, long newInodeId) throws IOException {
     FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
     editLog.setNextTxId(firstTxId);
-    editLog.openForWrite();
+    editLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     
     PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
         FsPermission.createImmutable((short)0755));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index f654107..68d008f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -297,7 +297,7 @@ public class TestEditLog {
       editLog.logSetReplication("fakefile", (short) 1);
       editLog.logSync();
       
-      editLog.rollEditLog();
+      editLog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
 
       assertExistsInStorageDirs(
           cluster, NameNodeDirType.EDITS,
@@ -370,7 +370,7 @@ public class TestEditLog {
       
       // Roll log so new output buffer size takes effect
       // we should now be writing to edits_inprogress_3
-      fsimage.rollEditLog();
+      fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     
       // Remember the current lastInodeId and will reset it back to test
       // loading editlog segments.The transactions in the following allocate new
@@ -401,7 +401,7 @@ public class TestEditLog {
       trans.run();
 
       // Roll another time to finalize edits_inprogress_3
-      fsimage.rollEditLog();
+      fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       
       long expectedTxns = ((NUM_THREADS+1) * 2 * NUM_TRANSACTIONS) + 2; // +2 for start/end txns
    
@@ -940,7 +940,7 @@ public class TestEditLog {
     FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
     try {
       FileUtil.setWritable(logDir, false);
-      log.openForWrite();
+      log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       fail("Did no throw exception on only having a bad dir");
     } catch (IOException ioe) {
       GenericTestUtils.assertExceptionContains(
@@ -965,7 +965,7 @@ public class TestEditLog {
         new byte[500]);
     
     try {
-      log.openForWrite();
+      log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
       log.setMetricsForTests(mockMetrics);
 
@@ -1139,7 +1139,7 @@ public class TestEditLog {
     // logGenerationStamp is used, simply because it doesn't 
     // require complex arguments.
     editlog.initJournalsForWrite();
-    editlog.openForWrite();
+    editlog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     for (int i = 2; i < TXNS_PER_ROLL; i++) {
       editlog.logGenerationStampV2((long) 0);
     }
@@ -1151,7 +1151,7 @@ public class TestEditLog {
     // the specified journal is aborted. It will be brought
     // back into rotation automatically by rollEditLog
     for (int i = 0; i < numrolls; i++) {
-      editlog.rollEditLog();
+      editlog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       
       editlog.logGenerationStampV2((long) i);
       editlog.logSync();
@@ -1485,7 +1485,7 @@ public class TestEditLog {
             cluster, NameNodeDirType.EDITS,
             NNStorage.getInProgressEditsFileName((i * 3) + 1));
         editLog.logSync();
-        editLog.rollEditLog();
+        editLog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
         assertExistsInStorageDirs(
             cluster, NameNodeDirType.EDITS,
             NNStorage.getFinalizedEditsFileName((i * 3) + 1, (i * 3) + 3));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4319c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index bc55d12..55ba379 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -285,7 +285,7 @@ public class TestFSEditLogLoader {
       // FSEditLog#endCurrentLogSegment.  For testing purposes, we
       // disable that here.
       doNothing().when(spyLog).endCurrentLogSegment(true);
-      spyLog.openForWrite();
+      spyLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
       assertTrue("should exist: " + inProgressFile, inProgressFile.exists());
       
       for (int i = 0; i < numTx; i++) {


[26/50] hadoop git commit: HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable to private. Contributed by Rakesh R.

Posted by zj...@apache.org.
HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable to private. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9ee232e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9ee232e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9ee232e

Branch: refs/heads/YARN-2928
Commit: d9ee232e895222fef7c1cb6b6e4158246e5ebc6f
Parents: e72a346
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Jun 4 11:09:19 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:57 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java     | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ee232e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bb65105..181f52b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -597,6 +597,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to
     BlockPlacementPolicyRackFaultTolerant. (wang)
 
+    HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable
+    to private. (Rakesh R via wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ee232e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 1dc4a9f..695e6da 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -107,7 +107,7 @@ public class DFSOutputStream extends FSOutputSummer
   protected final int bytesPerChecksum;
 
   protected DFSPacket currentPacket = null;
-  protected DataStreamer streamer;
+  private DataStreamer streamer;
   protected int packetSize = 0; // write packet size, not including the header.
   protected int chunksPerPacket = 0;
   protected long lastFlushOffset = 0; // offset when flush was invoked


[04/50] hadoop git commit: HDFS-8521. Add VisibleForTesting annotation to BlockPoolSlice#selectReplicaToDelete. (cmccabe)

Posted by zj...@apache.org.
HDFS-8521. Add VisibleForTesting annotation to BlockPoolSlice#selectReplicaToDelete. (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb3037e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb3037e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb3037e6

Branch: refs/heads/YARN-2928
Commit: fb3037e645371a8cf3ba88644203f584dea6e41d
Parents: 0f407fc
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Jun 2 20:06:28 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:12 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java       | 3 +++
 2 files changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3037e6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2ce54c4..abf6452 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -862,6 +862,9 @@ Release 2.7.1 - UNRELEASED
     HDFS-8213. DFSClient should use hdfs.client.htrace HTrace configuration
     prefix rather than hadoop.htrace (cmccabe)
 
+    HDFS-8521. Add VisibleForTesting annotation to
+    BlockPoolSlice#selectReplicaToDelete. (cmccabe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3037e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 94aaf21..d1f7c5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -57,7 +57,9 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.Time;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.io.Files;
+
 /**
  * A block pool slice represents a portion of a block pool stored on a volume.  
  * Taken together, all BlockPoolSlices sharing a block pool ID across a 
@@ -562,6 +564,7 @@ class BlockPoolSlice {
     return replicaToKeep;
   }
 
+  @VisibleForTesting
   static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
       final ReplicaInfo replica2) {
     ReplicaInfo replicaToKeep;


[31/50] hadoop git commit: HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all block replicas. Contributed by Eddy Xu.

Posted by zj...@apache.org.
HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all block replicas. Contributed by Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42ba35bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42ba35bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42ba35bd

Branch: refs/heads/YARN-2928
Commit: 42ba35bdde24d2ec521fccd355fad9e02cddf57c
Parents: e8bed30
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Jun 4 15:35:07 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:58 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 6 ++++--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md              | 2 +-
 3 files changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ba35bd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 181f52b..48d8eb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -600,6 +600,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8532. Make the visibility of DFSOutputStream#streamer member variable
     to private. (Rakesh R via wang)
 
+    HDFS-8535. Clarify that dfs usage in dfsadmin -report output includes all
+    block replicas. (Eddy Xu via wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ba35bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 11f2c32..b978189 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -910,9 +910,11 @@ public class DFSAdmin extends FsShell {
       commonUsageSummary;
 
     String report ="-report [-live] [-dead] [-decommissioning]:\n" +
-      "\tReports basic filesystem information and statistics.\n" +
+      "\tReports basic filesystem information and statistics. \n" +
+      "\tThe dfs usage can be different from \"du\" usage, because it\n" +
+      "\tmeasures raw space used by replication, checksums, snapshots\n" +
+      "\tand etc. on all the DNs.\n" +
       "\tOptional flags may be used to filter the list of displayed DNs.\n";
-    
 
     String safemode = "-safemode <enter|leave|get|wait>:  Safe mode maintenance command.\n" + 
       "\t\tSafe mode is a Namenode state in which it\n" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42ba35bd/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 2121958..fab15f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -336,7 +336,7 @@ Usage:
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| `-report` `[-live]` `[-dead]` `[-decommissioning]` | Reports basic filesystem information and statistics. Optional flags may be used to filter the list of displayed DataNodes. |
+| `-report` `[-live]` `[-dead]` `[-decommissioning]` | Reports basic filesystem information and statistics, The dfs usage can be different from "du" usage, because it measures raw space used by replication, checksums, snapshots and etc. on all the DNs. Optional flags may be used to filter the list of displayed DataNodes. |
 | `-safemode` enter\|leave\|get\|wait | Safe mode maintenance command. Safe mode is a Namenode state in which it <br/>1. does not accept changes to the name space (read-only) <br/>2. does not replicate or delete blocks. <br/>Safe mode is entered automatically at Namenode startup, and leaves safe mode automatically when the configured minimum percentage of blocks satisfies the minimum replication condition. Safe mode can also be entered manually, but then it can only be turned off manually as well. |
 | `-saveNamespace` | Save current namespace into storage directories and reset edits log. Requires safe mode. |
 | `-rollEdits` | Rolls the edit log on the active NameNode. |


[28/50] hadoop git commit: HADOOP-11924. Tolerate JDK-8047340-related exceptions in Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)

Posted by zj...@apache.org.
HADOOP-11924. Tolerate JDK-8047340-related exceptions in Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96a8d01a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96a8d01a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96a8d01a

Branch: refs/heads/YARN-2928
Commit: 96a8d01a380c904c84053c3a106a738f018eb5ff
Parents: b200b88
Author: Gera Shegalov <ge...@apache.org>
Authored: Thu Jun 4 11:38:28 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:57 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt          |  3 +++
 .../src/main/java/org/apache/hadoop/util/Shell.java      | 11 ++++++++++-
 .../java/org/apache/hadoop/util/TestStringUtils.java     |  3 ---
 3 files changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a8d01a/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 942d9e9..5f4bdb8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -823,6 +823,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11994. smart-apply-patch wrongly assumes that git is infallible.
     (Kengo Seki via Arpit Agarwal)
 
+    HADOOP-11924. Tolerate JDK-8047340-related exceptions in
+    Shell#isSetSidAvailable preventing class init. (Tsuyoshi Ozawa via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a8d01a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index f0100d4..c76c921 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -392,7 +392,16 @@ abstract public class Shell {
     } catch (IOException ioe) {
       LOG.debug("setsid is not available on this machine. So not using it.");
       setsidSupported = false;
-    } finally { // handle the exit code
+    }  catch (Error err) {
+      if (err.getMessage().contains("posix_spawn is not " +
+          "a supported process launch mechanism")
+          && (Shell.FREEBSD || Shell.MAC)) {
+        // HADOOP-11924: This is a workaround to avoid failure of class init
+        // by JDK issue on TR locale(JDK-8047340).
+        LOG.info("Avoiding JDK-8047340 on BSD-based systems.", err);
+        setsidSupported = false;
+      }
+    }  finally { // handle the exit code
       if (LOG.isDebugEnabled()) {
         LOG.debug("setsid exited with exit code "
                  + (shexec != null ? shexec.getExitCode() : "(null executor)"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a8d01a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 5b0715f..85ab8c4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -417,9 +417,6 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
 
   @Test
   public void testLowerAndUpperStrings() {
-    // Due to java bug http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8047340,
-    // The test will fail with Turkish locality on Mac OS.
-    Assume.assumeTrue(Shell.LINUX);
     Locale defaultLocale = Locale.getDefault();
     try {
       Locale.setDefault(new Locale("tr", "TR"));


[09/50] hadoop git commit: HDFS-3716. Purger should remove stale fsimage ckpt files (Contributed by J.Andreina)

Posted by zj...@apache.org.
HDFS-3716. Purger should remove stale fsimage ckpt files (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dadcb31e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dadcb31e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dadcb31e

Branch: refs/heads/YARN-2928
Commit: dadcb31eba92de47316bb7b3f0a084caaf8ad906
Parents: 6de6796
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jun 3 15:30:40 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:13 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hadoop/hdfs/server/namenode/FSImage.java    |  1 +
 .../hdfs/server/namenode/TestFSImage.java       | 41 ++++++++++++++++++++
 3 files changed, 45 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadcb31e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3e25129..d65e513 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -843,6 +843,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8470. fsimage loading progress should update inode, delegation token and
     cache pool count. (surendra singh lilhore via vinayakumarb)
 
+    HDFS-3716. Purger should remove stale fsimage ckpt files
+    (J.Andreina via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadcb31e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 45184e7..cd7cf18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -1209,6 +1209,7 @@ public class FSImage implements Closeable {
       // Since we now have a new checkpoint, we can clean up some
       // old edit logs and checkpoints.
       purgeOldStorage(nnf);
+      archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_NEW);
     } finally {
       // Notify any threads waiting on the checkpoint to be canceled
       // that it is complete.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadcb31e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 27a1bd3..df20fd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
@@ -118,6 +120,45 @@ public class TestFSImage {
     }
   }
 
+   /**
+   * On checkpointing , stale fsimage checkpoint file should be deleted.
+   */
+  @Test
+  public void testRemovalStaleFsimageCkpt() throws IOException {
+    MiniDFSCluster cluster = null;
+    SecondaryNameNode secondary = null;
+    Configuration conf = new HdfsConfiguration();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).
+          numDataNodes(1).format(true).build();
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+          "0.0.0.0:0");
+      secondary = new SecondaryNameNode(conf);
+      // Do checkpointing
+      secondary.doCheckpoint();
+      NNStorage storage = secondary.getFSImage().storage;
+      File currentDir = FSImageTestUtil.
+          getCurrentDirs(storage, NameNodeDirType.IMAGE).get(0);
+      // Create a stale fsimage.ckpt file
+      File staleCkptFile = new File(currentDir.getPath() +
+          "/fsimage.ckpt_0000000000000000002");
+      staleCkptFile.createNewFile();
+      assertTrue(staleCkptFile.exists());
+      // After checkpoint stale fsimage.ckpt file should be deleted
+      secondary.doCheckpoint();
+      assertFalse(staleCkptFile.exists());
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+        secondary = null;
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+        cluster = null;
+      }
+    }
+  }
+
   /**
    * Ensure that the digest written by the saver equals to the digest of the
    * file.


[22/50] hadoop git commit: YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent to another. Contributed by Wangda Tan

Posted by zj...@apache.org.
YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent to another. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94db4f21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94db4f21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94db4f21

Branch: refs/heads/YARN-2928
Commit: 94db4f218b4376ba8547831a371366d409fd9ca1
Parents: 993bf8b
Author: Jian He <ji...@apache.org>
Authored: Thu Jun 4 10:52:07 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:56 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../scheduler/capacity/CapacityScheduler.java   | 11 +++++--
 .../scheduler/capacity/TestQueueParsing.java    | 33 ++++++++++++++++++++
 3 files changed, 45 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94db4f21/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 83aa12f..972066d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -717,6 +717,9 @@ Release 2.7.1 - UNRELEASED
     YARN-3733. Fix DominantRC#compare() does not work as expected if 
     cluster resource is empty. (Rohith Sharmaks via wangda)
 
+    YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent
+    to another. (Wangda Tan via jianhe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94db4f21/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 06d282d..f1d0f9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -551,8 +551,15 @@ public class CapacityScheduler extends
     // check that all static queues are included in the newQueues list
     for (Map.Entry<String, CSQueue> e : queues.entrySet()) {
       if (!(e.getValue() instanceof ReservationQueue)) {
-        if (!newQueues.containsKey(e.getKey())) {
-          throw new IOException(e.getKey() + " cannot be found during refresh!");
+        String queueName = e.getKey();
+        CSQueue oldQueue = e.getValue();
+        CSQueue newQueue = newQueues.get(queueName); 
+        if (null == newQueue) {
+          throw new IOException(queueName + " cannot be found during refresh!");
+        } else if (!oldQueue.getQueuePath().equals(newQueue.getQueuePath())) {
+          throw new IOException(queueName + " is moved from:"
+              + oldQueue.getQueuePath() + " to:" + newQueue.getQueuePath()
+              + " after refresh, which is not allowed.");
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94db4f21/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
index 8d04700..198bd4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
@@ -865,4 +865,37 @@ public class TestQueueParsing {
     capacityScheduler.start();
     ServiceOperations.stopQuietly(capacityScheduler);
   }
+  
+  @Test(expected = IOException.class)
+  public void testQueueParsingWithMoveQueue()
+      throws IOException {
+    YarnConfiguration conf = new YarnConfiguration();
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration(conf);
+    csConf.setQueues("root", new String[] { "a" });
+    csConf.setQueues("root.a", new String[] { "x", "y" });
+    csConf.setCapacity("root.a", 100);
+    csConf.setCapacity("root.a.x", 50);
+    csConf.setCapacity("root.a.y", 50);
+
+    CapacityScheduler capacityScheduler = new CapacityScheduler();
+    RMContextImpl rmContext =
+        new RMContextImpl(null, null, null, null, null, null,
+            new RMContainerTokenSecretManager(csConf),
+            new NMTokenSecretManagerInRM(csConf),
+            new ClientToAMTokenSecretManagerInRM(), null);
+    rmContext.setNodeLabelManager(nodeLabelManager);
+    capacityScheduler.setConf(csConf);
+    capacityScheduler.setRMContext(rmContext);
+    capacityScheduler.init(csConf);
+    capacityScheduler.start();
+    
+    csConf.setQueues("root", new String[] { "a", "x" });
+    csConf.setQueues("root.a", new String[] { "y" });
+    csConf.setCapacity("root.x", 50);
+    csConf.setCapacity("root.a", 50);
+    csConf.setCapacity("root.a.y", 100);
+    
+    capacityScheduler.reinitialize(csConf, rmContext);
+  }
 }


[30/50] hadoop git commit: YARN-3766. Fixed the apps table column error of generic history web UI. Contributed by Xuan Gong.

Posted by zj...@apache.org.
YARN-3766. Fixed the apps table column error of generic history web UI. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8bed307
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8bed307
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8bed307

Branch: refs/heads/YARN-2928
Commit: e8bed3071909d5102f45b5e6ea9bb37f92b06fc7
Parents: eba031e
Author: Zhijie Shen <zj...@apache.org>
Authored: Thu Jun 4 14:46:32 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:58 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                  | 3 +++
 .../yarn/server/applicationhistoryservice/webapp/AHSView.java    | 2 +-
 .../java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java  | 4 ++++
 3 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bed307/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1c36c9b..69efca4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -581,6 +581,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3749. We should make a copy of configuration when init MiniYARNCluster
     with multiple RMs. (Chun Chen via xgong)
 
+    YARN-3766. Fixed the apps table column error of generic history web UI.
+    (Xuan Gong via zjshen)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bed307/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
index 152364e..65b5ac1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java
@@ -40,7 +40,7 @@ public class AHSView extends TwoColumnLayout {
   protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
     set(DATATABLES_ID, "apps");
-    set(initID(DATATABLES, "apps"), WebPageUtils.appsTableInit());
+    set(initID(DATATABLES, "apps"), WebPageUtils.appsTableInit(false));
     setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
 
     // Set the correct title.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8bed307/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index ed0fe38..df63b77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -27,6 +27,10 @@ public class WebPageUtils {
     return appsTableInit(false, true);
   }
 
+  public static String appsTableInit(boolean isResourceManager) {
+    return appsTableInit(false, isResourceManager);
+  }
+
   public static String appsTableInit(
       boolean isFairSchedulerPage, boolean isResourceManager) {
     // id, user, name, queue, starttime, finishtime, state, status, progress, ui


[15/50] hadoop git commit: MAPREDUCE-5965. Hadoop streaming throws error if list of input files is high. Error is: "error=7, Argument list too long at if number of input file is high" (wilfreds via rkanter)

Posted by zj...@apache.org.
MAPREDUCE-5965. Hadoop streaming throws error if list of input files is high. Error is: "error=7, Argument list too long at if number of input file is high" (wilfreds via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4399269
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4399269
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4399269

Branch: refs/heads/YARN-2928
Commit: c43992691ad19de62d0ceb95346c6f30c9b267b8
Parents: 8d39b34
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Jun 3 18:41:45 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:15 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt             |  4 ++++
 .../org/apache/hadoop/streaming/PipeMapRed.java  | 19 +++++++++++--------
 .../org/apache/hadoop/streaming/StreamJob.java   |  5 ++++-
 .../src/site/markdown/HadoopStreaming.md.vm      |  9 +++++++++
 4 files changed, 28 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4399269/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 5cc08a3..9fa6c5a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -467,6 +467,10 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6374. Distributed Cache File visibility should check permission
     of full path (Chang Li via jlowe)
 
+    MAPREDUCE-5965. Hadoop streaming throws error if list of input files is
+    high. Error is: "error=7, Argument list too long at if number of input
+    file is high" (wilfreds via rkanter)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4399269/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
index f47e756..77c7252 100644
--- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
+++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java
@@ -19,8 +19,7 @@
 package org.apache.hadoop.streaming;
 
 import java.io.*;
-import java.util.Map;
-import java.util.Iterator;
+import java.util.Map.Entry;
 import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Properties;
@@ -238,13 +237,17 @@ public abstract class PipeMapRed {
   void addJobConfToEnvironment(JobConf jobconf, Properties env) {
     JobConf conf = new JobConf(jobconf);
     conf.setDeprecatedProperties();
-    Iterator it = conf.iterator();
-    while (it.hasNext()) {
-      Map.Entry en = (Map.Entry) it.next();
-      String name = (String) en.getKey();
-      //String value = (String)en.getValue(); // does not apply variable expansion
-      String value = conf.get(name); // does variable expansion 
+    int lenLimit = conf.getInt("stream.jobconf.truncate.limit", -1);
+
+    for (Entry<String, String> confEntry: conf) {
+      String name = confEntry.getKey();
+      String value = conf.get(name); // does variable expansion
       name = safeEnvVarName(name);
+      if (lenLimit > -1  && value.length() > lenLimit) {
+        LOG.warn("Environment variable " + name + " truncated to " + lenLimit
+            + " to  fit system limits.");
+        value = value.substring(0, lenLimit);
+      }
       envPut(env, name, value);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4399269/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
index 7ff5641..118e0fb 100644
--- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
+++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
@@ -617,7 +617,10 @@ public class StreamJob implements Tool {
         "/path/my-hadoop-streaming.jar");
     System.out.println("For more details about jobconf parameters see:");
     System.out.println("  http://wiki.apache.org/hadoop/JobConfFile");
-    System.out.println("To set an environement variable in a streaming " +
+    System.out.println("Truncate the values of the job configuration copied" +
+        "to the environment at the given length:");
+    System.out.println("   -D stream.jobconf.truncate.limit=-1");
+    System.out.println("To set an environment variable in a streaming " +
         "command:");
     System.out.println("   -cmdenv EXAMPLE_DIR=/home/example/dictionaries/");
     System.out.println();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4399269/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
index 7f2412e..0598a35 100644
--- a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -55,6 +55,7 @@ Hadoop Streaming
         * [How do I update counters in streaming applications?](#How_do_I_update_counters_in_streaming_applications)
         * [How do I update status in streaming applications?](#How_do_I_update_status_in_streaming_applications)
         * [How do I get the Job variables in a streaming job's mapper/reducer?](#How_do_I_get_the_Job_variables_in_a_streaming_jobs_mapperreducer)
+        * [What do I do if I get a "error=7, Argument list too long"](#What_do_I_do_if_I_get_a_error_Argument_list_too_long)
 
 Hadoop Streaming
 ----------------
@@ -564,3 +565,11 @@ A streaming process can use the stderr to emit status information. To set a stat
 $H3 How do I get the Job variables in a streaming job's mapper/reducer?
 
 See [Configured Parameters](../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html#Configured_Parameters). During the execution of a streaming job, the names of the "mapred" parameters are transformed. The dots ( . ) become underscores ( \_ ). For example, mapreduce.job.id becomes mapreduce\_job\_id and mapreduce.job.jar becomes mapreduce\_job\_jar. In your code, use the parameter names with the underscores.
+
+$H3 What do I do if I get a "error=7, Argument list too long"
+
+The job copies the whole configuration to the environment. If the job is processing a large number of input files adding the job configuration to the environment could cause an overrun of the environment. The job configuration copy in the environment is not essential for running the job and can be truncated by setting:
+
+  -D stream.jobconf.truncate.limit=20000
+
+By default the values are not truncated (-1). Zero (0) will only copy the names and not values. For almost all cases 20000 is a safe value that will prevent the overrun of the environment.


[37/50] hadoop git commit: YARN-1462. AHS API and other AHS changes to handle tags for completed MR jobs. Contributed by Xuan Gong

Posted by zj...@apache.org.
YARN-1462. AHS API and other AHS changes to handle tags for completed MR jobs. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d0d5dce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d0d5dce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d0d5dce

Branch: refs/heads/YARN-2928
Commit: 9d0d5dcef7a426a4b57d2cc1f37fc2c969036711
Parents: f82a100
Author: Xuan <xg...@apache.org>
Authored: Fri Jun 5 12:48:52 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:59 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  2 ++
 .../yarn/api/records/ApplicationReport.java     | 19 ++++++++++
 ...pplicationHistoryManagerOnTimelineStore.java | 19 ++++++++--
 ...pplicationHistoryManagerOnTimelineStore.java |  9 +++++
 .../metrics/ApplicationMetricsConstants.java    |  1 +
 .../metrics/ApplicationCreatedEvent.java        | 10 +++++-
 .../metrics/SystemMetricsPublisher.java         |  4 ++-
 .../metrics/TestSystemMetricsPublisher.java     | 37 ++++++++++++++++++++
 .../src/site/markdown/TimelineServer.md         | 30 ++++++++++++++--
 9 files changed, 124 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d5e8bba..3643d0c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -383,6 +383,8 @@ Release 2.8.0 - UNRELEASED
     YARN-2392. Add more diags about app retry limits on AM failures. (Steve
     Loughran via jianhe)
 
+    YARN-1462. AHS API and other AHS changes to handle tags for completed MR jobs. (xgong)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index e5d7254..444a202 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -83,6 +83,25 @@ public abstract class ApplicationReport {
     return report;
   }
 
+  @Private
+  @Unstable
+  public static ApplicationReport newInstance(ApplicationId applicationId,
+      ApplicationAttemptId applicationAttemptId, String user, String queue,
+      String name, String host, int rpcPort, Token clientToAMToken,
+      YarnApplicationState state, String diagnostics, String url,
+      long startTime, long finishTime, FinalApplicationStatus finalStatus,
+      ApplicationResourceUsageReport appResources, String origTrackingUrl,
+      float progress, String applicationType, Token amRmToken,
+      Set<String> tags) {
+    ApplicationReport report =
+        newInstance(applicationId, applicationAttemptId, user, queue, name,
+          host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
+          finishTime, finalStatus, appResources, origTrackingUrl, progress,
+          applicationType, amRmToken);
+    report.setApplicationTags(tags);
+    return report;
+  }
+
   /**
    * Get the <code>ApplicationId</code> of the application.
    * @return <code>ApplicationId</code> of the application

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 9bfd2d6..0c7fdc0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -19,11 +19,14 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice;
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -249,6 +252,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
     FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
     YarnApplicationState state = YarnApplicationState.ACCEPTED;
     ApplicationResourceUsageReport appResources = null;
+    Set<String> appTags = null;
     Map<ApplicationAccessType, String> appViewACLs =
         new HashMap<ApplicationAccessType, String>();
     Map<String, Object> entityInfo = entity.getOtherInfo();
@@ -270,7 +274,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
             ConverterUtils.toApplicationId(entity.getEntityId()),
             latestApplicationAttemptId, user, queue, name, null, -1, null, state,
             diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null,
-            null, progress, type, null), appViewACLs);
+            null, progress, type, null, appTags), appViewACLs);
       }
       if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
         queue =
@@ -295,6 +299,17 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         appResources=ApplicationResourceUsageReport
             .newInstance(0, 0, null, null, null, memorySeconds, vcoreSeconds);
       }
+      if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
+        appTags = new HashSet<String>();
+        Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
+        if (obj != null && obj instanceof Collection<?>) {
+          for(Object o : (Collection<?>)obj) {
+            if (o != null) {
+              appTags.add(o.toString());
+            }
+          }
+        }
+      }
     }
     List<TimelineEvent> events = entity.getEvents();
     if (events != null) {
@@ -347,7 +362,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         ConverterUtils.toApplicationId(entity.getEntityId()),
         latestApplicationAttemptId, user, queue, name, null, -1, null, state,
         diagnosticsInfo, null, createdTime, finishedTime, finalStatus, appResources,
-        null, progress, type, null), appViewACLs);
+        null, progress, type, null, appTags), appViewACLs);
   }
 
   private static ApplicationAttemptReport convertToApplicationAttemptReport(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
index 8672953..dbd75ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
@@ -22,7 +22,9 @@ import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
@@ -185,6 +187,9 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       Assert.assertEquals(Integer.MAX_VALUE + 3L
           + +app.getApplicationId().getId(), app.getFinishTime());
       Assert.assertTrue(Math.abs(app.getProgress() - 1.0F) < 0.0001);
+      Assert.assertEquals(2, app.getApplicationTags().size());
+      Assert.assertTrue(app.getApplicationTags().contains("Test_APP_TAGS_1"));
+      Assert.assertTrue(app.getApplicationTags().contains("Test_APP_TAGS_2"));
       // App 2 doesn't have the ACLs, such that the default ACLs " " will be used.
       // Nobody except admin and owner has access to the details of the app.
       if ((i ==  1 && callerUGI != null &&
@@ -471,6 +476,10 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       entityInfo.put(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO,
           "user2");
     }
+    Set<String> appTags = new HashSet<String>();
+    appTags.add("Test_APP_TAGS_1");
+    appTags.add("Test_APP_TAGS_2");
+    entityInfo.put(ApplicationMetricsConstants.APP_TAGS_INFO, appTags);
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setEventType(ApplicationMetricsConstants.CREATED_EVENT_TYPE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index df8eecb..f452410 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -73,4 +73,5 @@ public class ApplicationMetricsConstants {
   public static final String LATEST_APP_ATTEMPT_EVENT_INFO =
       "YARN_APPLICATION_LATEST_APP_ATTEMPT";
 
+  public static final String APP_TAGS_INFO = "YARN_APPLICATION_TAGS";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
index 2373b3b..7c43aa4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 
+import java.util.Set;
+
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 
 public class ApplicationCreatedEvent extends
@@ -29,6 +31,7 @@ public class ApplicationCreatedEvent extends
   private String user;
   private String queue;
   private long submittedTime;
+  private Set<String> appTags;
 
   public ApplicationCreatedEvent(ApplicationId appId,
       String name,
@@ -36,7 +39,8 @@ public class ApplicationCreatedEvent extends
       String user,
       String queue,
       long submittedTime,
-      long createdTime) {
+      long createdTime,
+      Set<String> appTags) {
     super(SystemMetricsEventType.APP_CREATED, createdTime);
     this.appId = appId;
     this.name = name;
@@ -44,6 +48,7 @@ public class ApplicationCreatedEvent extends
     this.user = user;
     this.queue = queue;
     this.submittedTime = submittedTime;
+    this.appTags = appTags;
   }
 
   @Override
@@ -75,4 +80,7 @@ public class ApplicationCreatedEvent extends
     return submittedTime;
   }
 
+  public Set<String> getAppTags() {
+    return appTags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
index 2828aec..63461b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
@@ -107,7 +107,7 @@ public class SystemMetricsPublisher extends CompositeService {
               app.getUser(),
               app.getQueue(),
               app.getSubmitTime(),
-              createdTime));
+              createdTime, app.getApplicationTags()));
     }
   }
 
@@ -252,6 +252,8 @@ public class SystemMetricsPublisher extends CompositeService {
         event.getQueue());
     entityInfo.put(ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO,
         event.getSubmittedTime());
+    entityInfo.put(ApplicationMetricsConstants.APP_TAGS_INFO,
+        event.getAppTags());
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setEventType(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index 7c4b5e9..b122bc4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -21,7 +21,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.metrics;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -143,6 +147,8 @@ public class TestSystemMetricsPublisher {
       Assert.assertEquals(app.getSubmitTime(),
           entity.getOtherInfo().get(
               ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO));
+      Assert.assertTrue(verifyAppTags(app.getApplicationTags(),
+          entity.getOtherInfo()));
       if (i == 1) {
         Assert.assertEquals("uers1,user2",
             entity.getOtherInfo().get(
@@ -352,6 +358,10 @@ public class TestSystemMetricsPublisher {
         FinalApplicationStatus.UNDEFINED);
     when(app.getRMAppMetrics()).thenReturn(
         new RMAppMetrics(null, 0, 0, Integer.MAX_VALUE, Long.MAX_VALUE));
+    Set<String> appTags = new HashSet<String>();
+    appTags.add("test");
+    appTags.add("tags");
+    when(app.getApplicationTags()).thenReturn(appTags);
     return app;
   }
 
@@ -392,4 +402,31 @@ public class TestSystemMetricsPublisher {
     return container;
   }
 
+  private static boolean verifyAppTags(Set<String> appTags,
+      Map<String, Object> entityInfo) {
+    if (!entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
+      return false;
+    }
+    Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
+    if (obj instanceof Collection<?>) {
+      Collection<?> collection = (Collection<?>) obj;
+      if (collection.size() != appTags.size()) {
+        return false;
+      }
+      for (String appTag : appTags) {
+        boolean match = false;
+        for (Object o : collection) {
+          if (o.toString().equals(appTag)) {
+            match = true;
+            break;
+          }
+        }
+        if (!match) {
+          return false;
+        }
+      }
+      return true;
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d0d5dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 90f9512..1b66606 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -1125,7 +1125,27 @@ Response Body:
           "submittedTime":1430424769395,
           "startedTime":1430424769395,
           "finishedTime":1430424776594,
-          "elapsedTime":7199
+          "elapsedTime":7199},
+          {
+          "appId":"application_1430424020775_0001",
+          "currentAppAttemptId":"appattempt_1430424020775_0001_000001",
+          "user":"zshen",
+          "name":"QuasiMonteCarlo",
+          "queue":"default",
+          "type":"MAPREDUCE",
+          "host":"localhost",
+          "rpcPort":56264,
+          "appState":"FINISHED",
+          "progress":100.0,
+          "diagnosticsInfo":"",
+          "originalTrackingUrl":"http://d-69-91-129-173.dhcp4.washington.edu:19888/jobhistory/job/job_1430424020775_0001",
+          "trackingUrl":"http://d-69-91-129-173.dhcp4.washington.edu:8088/proxy/application_1430424020775_0001/",
+          "finalAppStatus":"SUCCEEDED",
+          "submittedTime":1430424053809,
+          "startedTime":1430424072153,
+          "finishedTime":1430424776594,
+          "elapsedTime":18344,
+          "applicationTags":"mrapplication,ta-example"
           }
       ]
     }
@@ -1227,6 +1247,7 @@ Response Body:
         <startedTime>1430424053809</startedTime>
         <finishedTime>1430424072153</finishedTime>
         <elapsedTime>18344</elapsedTime>
+        <applicationTags>mrapplication,ta-example</applicationTags>
       </app>
     </apps>
 
@@ -1275,7 +1296,8 @@ None
 | `allocatedVCores` | int | The sum of virtual cores allocated to the application's running containers |
 | `currentAppAttemptId` | string | The latest application attempt ID |
 | `host` | string | The host of the ApplicationMaster |
-| `rpcPort` | int | The RPC port of the ApplicationMaster; zero if no IPC service declared. |
+| `rpcPort` | int | The RPC port of the ApplicationMaster; zero if no IPC service declared |
+| `applicationTags` | string | The application tags. |
 
 ### Response Examples:
 
@@ -1311,7 +1333,8 @@ Response Body:
       "submittedTime": 1430424053809,
       "startedTime": 1430424053809,
       "finishedTime": 1430424072153,
-      "elapsedTime": 18344
+      "elapsedTime": 18344,
+      "applicationTags": mrapplication,tag-example
     }
 
 #### XML response
@@ -1349,6 +1372,7 @@ Response Body:
        <startedTime>1430424053809</startedTime>
        <finishedTime>1430424072153</finishedTime>
        <elapsedTime>18344</elapsedTime>
+       <applicationTags>mrapplication,ta-example</applicationTags>
      </app>
 
 ## <a name="REST_API_APPLICATION_ATTEMPT_LIST"></a>Application Attempt List


[44/50] hadoop git commit: HDFS-8539. Hdfs doesnt have class debug in windows. Contributed by Anu Engineer.

Posted by zj...@apache.org.
HDFS-8539. Hdfs doesnt have class debug in windows. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0de32f74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0de32f74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0de32f74

Branch: refs/heads/YARN-2928
Commit: 0de32f748ef70ad1f761b351b8190467865872e8
Parents: 1e2c3de
Author: cnauroth <cn...@apache.org>
Authored: Sun Jun 7 13:01:43 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:01 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt           | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd | 8 +++++++-
 2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de32f74/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 21f587f..853a022 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -861,6 +861,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8463. Calling DFSInputStream.seekToNewSource just after stream creation
     causes NullPointerException (Masatake Iwasaki via kihwal)
 
+    HDFS-8539. Hdfs doesnt have class 'debug' in windows.
+    (Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de32f74/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 8115349..2181e47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
     )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto debug
   for %%i in ( %hdfscommands% ) do (
     if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -179,6 +179,11 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
   goto :eof
 
+:debug
+  set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
+  goto :eof
+
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if "%1" == "--config" (
@@ -237,4 +242,5 @@ goto :eof
   @echo.
   @echo Most commands print help when invoked w/o parameters.
 
+@rem There are also debug commands, but they don't show up in this listing.
 endlocal


[40/50] hadoop git commit: HADOOP-12059. S3Credentials should support use of CredentialProvider. Contributed by Sean Busbey.

Posted by zj...@apache.org.
HADOOP-12059. S3Credentials should support use of CredentialProvider. Contributed by Sean Busbey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddd92aa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddd92aa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddd92aa8

Branch: refs/heads/YARN-2928
Commit: ddd92aa8d515742fe214848a9cdaa1517f7f6349
Parents: 9d0d5dc
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Jun 5 13:11:01 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:00 2015 -0700

----------------------------------------------------------------------
 .gitignore                                      |   1 +
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/security/ProviderUtils.java   |  30 ++++++
 .../alias/AbstractJavaKeyStoreProvider.java     |  10 +-
 .../alias/LocalJavaKeyStoreProvider.java        |  25 ++++-
 .../alias/TestCredentialProviderFactory.java    |  17 ++-
 .../org/apache/hadoop/fs/s3/S3Credentials.java  |  10 +-
 .../apache/hadoop/fs/s3/TestS3Credentials.java  | 107 ++++++++++++++++++-
 8 files changed, 195 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 779f507..cde198e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,5 +22,6 @@ hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 yarnregistry.pdf
+hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
 patchprocess/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5f4bdb8..51579da 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -634,6 +634,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12037. Fix wrong classname in example configuration of hadoop-auth
     documentation. (Masatake Iwasaki via wang)
 
+    HADOOP-12059. S3Credentials should support use of CredentialProvider.
+    (Sean Busbey via wang)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
index 97d656d..b764506 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
@@ -19,8 +19,11 @@
 package org.apache.hadoop.security;
 
 import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
+import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider;
 
 public class ProviderUtils {
   /**
@@ -49,4 +52,31 @@ public class ProviderUtils {
     }
     return new Path(result.toString());
   }
+
+  /**
+   * Mangle given local java keystore file URI to allow use as a
+   * LocalJavaKeyStoreProvider.
+   * @param localFile absolute URI with file scheme and no authority component.
+   *                  i.e. return of File.toURI,
+   *                  e.g. file:///home/larry/creds.jceks
+   * @return URI of the form localjceks://file/home/larry/creds.jceks
+   * @throws IllegalArgumentException if localFile isn't not a file uri or if it
+   *                                  has an authority component.
+   * @throws URISyntaxException if the wrapping process violates RFC 2396
+   */
+  public static URI nestURIForLocalJavaKeyStoreProvider(final URI localFile)
+      throws URISyntaxException {
+    if (!("file".equals(localFile.getScheme()))) {
+      throw new IllegalArgumentException("passed URI had a scheme other than " +
+          "file.");
+    }
+    if (localFile.getAuthority() != null) {
+      throw new IllegalArgumentException("passed URI must not have an " +
+          "authority component. For non-local keystores, please use " +
+          JavaKeyStoreProvider.class.getName());
+    }
+    return new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
+        "//file" + localFile.getSchemeSpecificPart(), localFile.getFragment());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
index 9251044..76b8cd5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.security.alias;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -60,6 +62,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
  */
 @InterfaceAudience.Private
 public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
+  public static final Log LOG = LogFactory.getLog(
+      AbstractJavaKeyStoreProvider.class);
   public static final String CREDENTIAL_PASSWORD_NAME =
       "HADOOP_CREDSTORE_PASSWORD";
   public static final String KEYSTORE_PASSWORD_FILE_KEY =
@@ -197,6 +201,9 @@ public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
   protected void initFileSystem(URI keystoreUri, Configuration conf)
       throws IOException {
     path = ProviderUtils.unnestUri(keystoreUri);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("backing jks path initialized to " + path);
+    }
   }
 
   @Override
@@ -318,9 +325,10 @@ public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
     writeLock.lock();
     try {
       if (!changed) {
+        LOG.debug("Keystore hasn't changed, returning.");
         return;
       }
-      // write out the keystore
+      LOG.debug("Writing out keystore.");
       try (OutputStream out = getOutputStreamForKeystore()) {
         keyStore.store(out, password);
       } catch (KeyStoreException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java
index 3840979..61744c7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java
@@ -65,13 +65,17 @@ public final class LocalJavaKeyStoreProvider extends
 
   @Override
   protected OutputStream getOutputStreamForKeystore() throws IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("using '" + file + "' for output stream.");
+    }
     FileOutputStream out = new FileOutputStream(file);
     return out;
   }
 
   @Override
   protected boolean keystoreExists() throws IOException {
-    return file.exists();
+    /* The keystore loader doesn't handle zero length files. */
+    return file.exists() && (file.length() > 0);
   }
 
   @Override
@@ -122,6 +126,22 @@ public final class LocalJavaKeyStoreProvider extends
     super.initFileSystem(uri, conf);
     try {
       file = new File(new URI(getPath().toString()));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("initialized local file as '" + file + "'.");
+        if (file.exists()) {
+          LOG.debug("the local file exists and is size " + file.length());
+          if (LOG.isTraceEnabled()) {
+            if (file.canRead()) {
+              LOG.trace("we can read the local file.");
+            }
+            if (file.canWrite()) {
+              LOG.trace("we can write the local file.");
+            }
+          }
+        } else {
+          LOG.debug("the local file does not exist.");
+        }
+      }
     } catch (URISyntaxException e) {
       throw new IOException(e);
     }
@@ -130,6 +150,9 @@ public final class LocalJavaKeyStoreProvider extends
   @Override
   public void flush() throws IOException {
     super.flush();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Reseting permissions to '" + permissions + "'");
+    }
     if (!Shell.WINDOWS) {
       Files.setPosixFilePermissions(Paths.get(file.getCanonicalPath()),
           permissions);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
index 16cb0be..73cf3f4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
@@ -23,6 +23,8 @@ import java.net.URI;
 import java.util.List;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -32,14 +34,27 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+
+import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TestName;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestCredentialProviderFactory {
-  
+  public static final Log LOG = LogFactory.getLog(TestCredentialProviderFactory.class);
+
+  @Rule
+  public final TestName test = new TestName();
+
+  @Before
+  public void announce() {
+    LOG.info("Running test " + test.getMethodName());
+  }
+
   private static char[] chars = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
   'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
   'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
index 6b78ad7..fdacc3f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs.s3;
 
+import java.io.IOException;
 import java.net.URI;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -39,8 +40,10 @@ public class S3Credentials {
   /**
    * @throws IllegalArgumentException if credentials for S3 cannot be
    * determined.
+   * @throws IOException if credential providers are misconfigured and we have
+   *                     to talk to them.
    */
-  public void initialize(URI uri, Configuration conf) {
+  public void initialize(URI uri, Configuration conf) throws IOException {
     if (uri.getHost() == null) {
       throw new IllegalArgumentException("Invalid hostname in URI " + uri);
     }
@@ -64,7 +67,10 @@ public class S3Credentials {
       accessKey = conf.getTrimmed(accessKeyProperty);
     }
     if (secretAccessKey == null) {
-      secretAccessKey = conf.getTrimmed(secretAccessKeyProperty);
+      final char[] pass = conf.getPassword(secretAccessKeyProperty);
+      if (pass != null) {
+        secretAccessKey = (new String(pass)).trim();
+      }
     }
     if (accessKey == null && secretAccessKey == null) {
       throw new IllegalArgumentException("AWS " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddd92aa8/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
index bcbf0dc..28e1f4b 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/TestS3Credentials.java
@@ -17,13 +17,40 @@
  */
 package org.apache.hadoop.fs.s3;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.ProviderUtils;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+
+import java.io.File;
 import java.net.URI;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
-import org.apache.hadoop.conf.Configuration;
+public class TestS3Credentials {
+  public static final Log LOG = LogFactory.getLog(TestS3Credentials.class);
+
+  @Rule
+  public final TestName test = new TestName();
+
+  @Before
+  public void announce() {
+    LOG.info("Running test " + test.getMethodName());
+  }
+
+  private static final String EXAMPLE_ID = "AKASOMEACCESSKEY";
+  private static final String EXAMPLE_KEY =
+      "RGV0cm9pdCBSZ/WQgY2xl/YW5lZCB1cAEXAMPLE";
 
-public class TestS3Credentials extends TestCase {
+  @Test
   public void testInvalidHostnameWithUnderscores() throws Exception {
     S3Credentials s3Credentials = new S3Credentials();
     try {
@@ -33,4 +60,78 @@ public class TestS3Credentials extends TestCase {
       assertEquals("Invalid hostname in URI s3://a:b@c_d", e.getMessage());
     }
   }
+
+  @Test
+  public void testPlaintextConfigPassword() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set("fs.s3.awsAccessKeyId", EXAMPLE_ID);
+    conf.set("fs.s3.awsSecretAccessKey", EXAMPLE_KEY);
+    s3Credentials.initialize(new URI("s3://foobar"), conf);
+    assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
+        s3Credentials.getAccessKey());
+    assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
+        s3Credentials.getSecretAccessKey());
+  }
+
+  @Test
+  public void testPlaintextConfigPasswordWithWhitespace() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set("fs.s3.awsAccessKeyId", "\r\n " + EXAMPLE_ID +
+        " \r\n");
+    conf.set("fs.s3.awsSecretAccessKey", "\r\n " + EXAMPLE_KEY +
+        " \r\n");
+    s3Credentials.initialize(new URI("s3://foobar"), conf);
+    assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
+        s3Credentials.getAccessKey());
+    assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
+        s3Credentials.getSecretAccessKey());
+  }
+
+  @Rule
+  public final TemporaryFolder tempDir = new TemporaryFolder();
+
+  @Test
+  public void testCredentialProvider() throws Exception {
+    // set up conf to have a cred provider
+    final Configuration conf = new Configuration();
+    final File file = tempDir.newFile("test.jks");
+    final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
+        file.toURI());
+    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+        jks.toString());
+
+    // add our creds to the provider
+    final CredentialProvider provider =
+        CredentialProviderFactory.getProviders(conf).get(0);
+    provider.createCredentialEntry("fs.s3.awsSecretAccessKey",
+        EXAMPLE_KEY.toCharArray());
+    provider.flush();
+
+    // make sure S3Creds can retrieve things.
+    S3Credentials s3Credentials = new S3Credentials();
+    conf.set("fs.s3.awsAccessKeyId", EXAMPLE_ID);
+    s3Credentials.initialize(new URI("s3://foobar"), conf);
+    assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
+        s3Credentials.getAccessKey());
+    assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
+        s3Credentials.getSecretAccessKey());
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void noSecretShouldThrow() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set("fs.s3.awsAccessKeyId", EXAMPLE_ID);
+    s3Credentials.initialize(new URI("s3://foobar"), conf);
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void noAccessIdShouldThrow() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set("fs.s3.awsSecretAccessKey", EXAMPLE_KEY);
+    s3Credentials.initialize(new URI("s3://foobar"), conf);
+  }
 }


[19/50] hadoop git commit: HADOOP-12019. update BUILDING.txt to include python for 'mvn site' in windows (Contributed by Vinayakumar B)

Posted by zj...@apache.org.
HADOOP-12019. update BUILDING.txt to include python for 'mvn site' in windows (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df96753b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df96753b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df96753b

Branch: refs/heads/YARN-2928
Commit: df96753bf14854858476661e1c9b23062983982f
Parents: bb2903c
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Jun 4 10:42:52 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:16 2015 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    | 1 +
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df96753b/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index de0e0e8..2aeade4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -282,6 +282,7 @@ Requirements:
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
   tools must be present on your PATH.
+* Python ( for generation of docs using 'mvn site')
 
 Unix command-line tools are also included with the Windows Git package which
 can be downloaded from http://git-scm.com/download/win.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df96753b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 53bb150..cf35cfe 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -817,6 +817,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12018. smart-apply-patch.sh fails if the patch edits CR+LF files
     and is created by 'git diff --no-prefix'. (Kengo Seki via aajisaka)
 
+    HADOOP-12019. update BUILDING.txt to include python for 'mvn site'
+    in windows (vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[03/50] hadoop git commit: HDFS-8470. fsimage loading progress should update inode, delegation token and cache pool count. (Contributed by surendra singh lilhore)

Posted by zj...@apache.org.
HDFS-8470. fsimage loading progress should update inode, delegation token and cache pool count. (Contributed by surendra singh lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8dd3170
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8dd3170
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8dd3170

Branch: refs/heads/YARN-2928
Commit: b8dd3170c89333c8d2fc3b231773769d31473703
Parents: a2b4137
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jun 3 14:24:55 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:12 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/namenode/FSImageFormatPBINode.java   | 15 ++++++++--
 .../server/namenode/FSImageFormatProtobuf.java  | 30 ++++++++++++++------
 3 files changed, 36 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dd3170/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 402a547..8cbe0e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -840,6 +840,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8256. "-storagepolicies , -blockId ,-replicaDetails " options are missed
     out in usage and from documentation (J.Andreina via vinayakumarb)
 
+    HDFS-8470. fsimage loading progress should update inode, delegation token and
+    cache pool count. (surendra singh lilhore via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dd3170/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 1c14220..e8378e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -58,6 +58,10 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFea
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
@@ -251,11 +255,15 @@ public final class FSImageFormatPBINode {
       }
     }
 
-    void loadINodeSection(InputStream in) throws IOException {
+    void loadINodeSection(InputStream in, StartupProgress prog,
+        Step currentStep) throws IOException {
       INodeSection s = INodeSection.parseDelimitedFrom(in);
       fsn.dir.resetLastInodeId(s.getLastInodeId());
-      LOG.info("Loading " + s.getNumInodes() + " INodes.");
-      for (int i = 0; i < s.getNumInodes(); ++i) {
+      long numInodes = s.getNumInodes();
+      LOG.info("Loading " + numInodes + " INodes.");
+      prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
+      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
+      for (int i = 0; i < numInodes; ++i) {
         INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
         if (p.getId() == INodeId.ROOT_INODE_ID) {
           loadRootINode(p);
@@ -263,6 +271,7 @@ public final class FSImageFormatPBINode {
           INode n = loadINode(p);
           dir.addToInodeMap(n);
         }
+        counter.increment();
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dd3170/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 24afcae..69e9bb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
+import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
@@ -250,7 +251,7 @@ public final class FSImageFormatProtobuf {
         case INODE: {
           currentStep = new Step(StepType.INODES);
           prog.beginStep(Phase.LOADING_FSIMAGE, currentStep);
-          inodeLoader.loadINodeSection(in);
+          inodeLoader.loadINodeSection(in, prog, currentStep);
         }
           break;
         case INODE_REFERENCE:
@@ -272,14 +273,14 @@ public final class FSImageFormatProtobuf {
           prog.endStep(Phase.LOADING_FSIMAGE, currentStep);
           Step step = new Step(StepType.DELEGATION_TOKENS);
           prog.beginStep(Phase.LOADING_FSIMAGE, step);
-          loadSecretManagerSection(in);
+          loadSecretManagerSection(in, prog, step);
           prog.endStep(Phase.LOADING_FSIMAGE, step);
         }
           break;
         case CACHE_MANAGER: {
           Step step = new Step(StepType.CACHE_POOLS);
           prog.beginStep(Phase.LOADING_FSIMAGE, step);
-          loadCacheManagerSection(in);
+          loadCacheManagerSection(in, prog, step);
           prog.endStep(Phase.LOADING_FSIMAGE, step);
         }
           break;
@@ -316,7 +317,8 @@ public final class FSImageFormatProtobuf {
       }
     }
 
-    private void loadSecretManagerSection(InputStream in) throws IOException {
+    private void loadSecretManagerSection(InputStream in, StartupProgress prog,
+        Step currentStep) throws IOException {
       SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(in);
       int numKeys = s.getNumKeys(), numTokens = s.getNumTokens();
       ArrayList<SecretManagerSection.DelegationKey> keys = Lists
@@ -327,20 +329,30 @@ public final class FSImageFormatProtobuf {
       for (int i = 0; i < numKeys; ++i)
         keys.add(SecretManagerSection.DelegationKey.parseDelimitedFrom(in));
 
-      for (int i = 0; i < numTokens; ++i)
+      prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numTokens);
+      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
+      for (int i = 0; i < numTokens; ++i) {
         tokens.add(SecretManagerSection.PersistToken.parseDelimitedFrom(in));
+        counter.increment();
+      }
 
       fsn.loadSecretManagerState(s, keys, tokens);
     }
 
-    private void loadCacheManagerSection(InputStream in) throws IOException {
+    private void loadCacheManagerSection(InputStream in, StartupProgress prog,
+        Step currentStep) throws IOException {
       CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(in);
-      ArrayList<CachePoolInfoProto> pools = Lists.newArrayListWithCapacity(s
-          .getNumPools());
+      int numPools = s.getNumPools();
+      ArrayList<CachePoolInfoProto> pools = Lists
+          .newArrayListWithCapacity(numPools);
       ArrayList<CacheDirectiveInfoProto> directives = Lists
           .newArrayListWithCapacity(s.getNumDirectives());
-      for (int i = 0; i < s.getNumPools(); ++i)
+      prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numPools);
+      Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
+      for (int i = 0; i < numPools; ++i) {
         pools.add(CachePoolInfoProto.parseDelimitedFrom(in));
+        counter.increment();
+      }
       for (int i = 0; i < s.getNumDirectives(); ++i)
         directives.add(CacheDirectiveInfoProto.parseDelimitedFrom(in));
       fsn.getCacheManager().loadState(


[05/50] hadoop git commit: HDFS-8270. create() always retried with hardcoded timeout when file already exists with open lease (Contributed by J.Andreina)

Posted by zj...@apache.org.
HDFS-8270. create() always retried with hardcoded timeout when file already exists with open lease (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2b41375
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2b41375
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2b41375

Branch: refs/heads/YARN-2928
Commit: a2b4137519faeace7b20cdc8da8106234b517215
Parents: f029f9b
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jun 3 12:11:46 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:12 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  5 +--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 ----
 .../org/apache/hadoop/hdfs/NameNodeProxies.java | 43 +-------------------
 .../apache/hadoop/hdfs/TestFileCreation.java    |  6 ---
 5 files changed, 5 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index abf6452..402a547 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -936,6 +936,9 @@ Release 2.7.1 - UNRELEASED
     HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P.
     McCabe)
 
+    HDFS-8270. create() always retried with hardcoded timeout when file already
+    exists with open lease (J.Andreina via vinayakumarb)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index fc1cd26..f4ceab3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -342,13 +342,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
       this.namenode = rpcNamenode;
       dtService = null;
     } else {
-      boolean noRetries = conf.getBoolean(
-          DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES,
-          DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT);
       Preconditions.checkArgument(nameNodeUri != null,
           "null URI");
       proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
-          ClientProtocol.class, nnFallbackToSimpleAuth, !noRetries);
+          ClientProtocol.class, nnFallbackToSimpleAuth);
       this.dtService = proxyInfo.getDelegationTokenService();
       this.namenode = proxyInfo.getProxy();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9c19f91..5bb6e53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs;
 
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -999,13 +998,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
   public static final int     DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
 
-  // Create a NN proxy without retries for testing.
-  @VisibleForTesting
-  public static final String  DFS_CLIENT_TEST_NO_PROXY_RETRIES =
-      "dfs.client.test.no.proxy.retries";
-  @VisibleForTesting
-  public static final boolean DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT = false;
-
   public static final String  DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
       "dfs.client.slow.io.warning.threshold.ms";
   public static final long    DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index bafc76f..d873526 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
@@ -43,7 +42,6 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
@@ -161,31 +159,6 @@ public class NameNodeProxies {
   public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
       URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
       throws IOException {
-    return createProxy(conf, nameNodeUri, xface, fallbackToSimpleAuth, true);
-  }
-
-  /**
-   * Creates the namenode proxy with the passed protocol. This will handle
-   * creation of either HA- or non-HA-enabled proxy objects, depending upon
-   * if the provided URI is a configured logical URI.
-   *
-   * @param conf the configuration containing the required IPC
-   *        properties, client failover configurations, etc.
-   * @param nameNodeUri the URI pointing either to a specific NameNode
-   *        or to a logical nameservice.
-   * @param xface the IPC interface which should be created
-   * @param fallbackToSimpleAuth set to true or false during calls to
-   *   indicate if a secure client falls back to simple auth
-   * @param withRetries certain interfaces have a non-standard retry policy
-   * @return an object containing both the proxy and the associated
-   *         delegation token service it corresponds to
-   * @throws IOException if there is an error creating the proxy
-   **/
-  @SuppressWarnings("unchecked")
-  public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
-      URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth,
-      boolean withRetries)
-      throws IOException {
     AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
         createFailoverProxyProvider(conf, nameNodeUri, xface, true,
           fallbackToSimpleAuth);
@@ -193,7 +166,7 @@ public class NameNodeProxies {
     if (failoverProxyProvider == null) {
       // Non-HA case
       return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
-          UserGroupInformation.getCurrentUser(), withRetries,
+          UserGroupInformation.getCurrentUser(), true,
           fallbackToSimpleAuth);
     } else {
       // HA case
@@ -442,22 +415,8 @@ public class NameNodeProxies {
 
     if (withRetries) { // create the proxy with retries
 
-      RetryPolicy createPolicy = RetryPolicies
-          .retryUpToMaximumCountWithFixedSleep(5,
-              HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
-    
-      Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap 
-                 = new HashMap<Class<? extends Exception>, RetryPolicy>();
-      remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
-          createPolicy);
-
-      RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
-          defaultPolicy, remoteExceptionToPolicyMap);
       Map<String, RetryPolicy> methodNameToPolicyMap 
                  = new HashMap<String, RetryPolicy>();
-    
-      methodNameToPolicyMap.put("create", methodPolicy);
-
       ClientProtocol translatorProxy =
         new ClientNamenodeProtocolTranslatorPB(proxy);
       return (ClientProtocol) RetryProxy.create(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2b41375/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 4977015..525d84e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -74,7 +73,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -380,10 +378,6 @@ public class TestFileCreation {
     SimulatedFSDataset.setFactory(conf);
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
 
-    // Force NameNodeProxies' createNNProxyWithClientProtocol to give
-    // up file creation after one failure.
-    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES, true);
-
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
 


[48/50] hadoop git commit: YARN-3747. TestLocalDirsHandlerService should delete the created test directory logDir2. Contributed by David Moore.

Posted by zj...@apache.org.
YARN-3747. TestLocalDirsHandlerService should delete the created test
directory logDir2. Contributed by David Moore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddf75e34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddf75e34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddf75e34

Branch: refs/heads/YARN-2928
Commit: ddf75e342ba7d502f79f72aebc6fb721b614f81d
Parents: 3ecbac8
Author: Devaraj K <de...@apache.org>
Authored: Mon Jun 8 15:32:13 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:02 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java      | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddf75e34/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index da4f3b2..ab0dcb9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -595,6 +595,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3780. Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition.
     (zhihai xu via devaraj)
 
+    YARN-3747. TestLocalDirsHandlerService should delete the created test directory logDir2.
+    (David Moore via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddf75e34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index a045e62..c61d1f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -144,7 +144,7 @@ public class TestLocalDirsHandlerService {
     FileUtils.deleteDirectory(new File(localDir1));
     FileUtils.deleteDirectory(new File(localDir2));
     FileUtils.deleteDirectory(new File(logDir1));
-    FileUtils.deleteDirectory(new File(logDir1));
+    FileUtils.deleteDirectory(new File(logDir2));
     dirSvc.close();
   }
 }


[38/50] hadoop git commit: HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect errors when listing a directory. Contributed by Zhihai Xu.

Posted by zj...@apache.org.
HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect errors when listing a directory. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01cd698b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01cd698b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01cd698b

Branch: refs/heads/YARN-2928
Commit: 01cd698bd5f21d01a654f7c963da6bf46e2b0005
Parents: ddd92aa
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Jun 5 13:52:21 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:00 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/util/DiskChecker.java     | 24 ++++++++++++++++----
 .../org/apache/hadoop/util/TestDiskChecker.java | 22 ++++++++++++++++++
 3 files changed, 45 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01cd698b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 51579da..4b1d0d1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -637,6 +637,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12059. S3Credentials should support use of CredentialProvider.
     (Sean Busbey via wang)
 
+    HADOOP-12056. Use DirectoryStream in DiskChecker#checkDirs to detect
+    errors when listing a directory. (Zhihai Xu via wang)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01cd698b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
index 6b27ae5..a36a7a0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.util;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.DirectoryIteratorException;
+import java.nio.file.Files;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -86,13 +89,26 @@ public class DiskChecker {
    */
   public static void checkDirs(File dir) throws DiskErrorException {
     checkDir(dir);
-    for (File child : dir.listFiles()) {
-      if (child.isDirectory()) {
-        checkDirs(child);
+    IOException ex = null;
+    try (DirectoryStream<java.nio.file.Path> stream =
+        Files.newDirectoryStream(dir.toPath())) {
+      for (java.nio.file.Path entry: stream) {
+        File child = entry.toFile();
+        if (child.isDirectory()) {
+          checkDirs(child);
+        }
       }
+    } catch (DirectoryIteratorException de) {
+      ex = de.getCause();
+    } catch (IOException ie) {
+      ex = ie;
+    }
+    if (ex != null) {
+      throw new DiskErrorException("I/O error when open a directory: "
+          + dir.toString(), ex);
     }
   }
-  
+
   /**
    * Create the directory if it doesn't exist and check that dir is readable,
    * writable and executable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01cd698b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index 5ab1313..de54735 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Shell;
 
@@ -180,4 +181,25 @@ public class TestDiskChecker {
     System.out.println("checkDir success: " + success);
 
   }
+
+  @Test (timeout = 30000)
+  public void testCheckDirsIOException() throws Throwable {
+    Path path = new Path("target", TestDiskChecker.class.getSimpleName());
+    File localDir = new File(path.toUri().getRawPath());
+    localDir.mkdir();
+    File localFile = new File(localDir, "test");
+    localFile.createNewFile();
+    File spyLocalDir = spy(localDir);
+    doReturn(localFile.toPath()).when(spyLocalDir).toPath();
+    try {
+      DiskChecker.checkDirs(spyLocalDir);
+      fail("Expected exception for I/O error");
+    } catch (DiskErrorException e) {
+      GenericTestUtils.assertExceptionContains("I/O error", e);
+      assertTrue(e.getCause() instanceof IOException);
+    } finally {
+      localFile.delete();
+      localDir.delete();
+    }
+  }
 }


[06/50] hadoop git commit: HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh is modified. Contributed by Kengo Seki.

Posted by zj...@apache.org.
HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh is modified. Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f029f9b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f029f9b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f029f9b9

Branch: refs/heads/YARN-2928
Commit: f029f9b95a65372ed52589e5b9c9eda53b311b6b
Parents: fb3037e
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Jun 3 15:01:02 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:12 2015 -0700

----------------------------------------------------------------------
 dev-support/test-patch.sh                       | 7 ++++---
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f029f9b9/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 1409467..cd91a5c 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1473,7 +1473,8 @@ function apply_patch_file
 }
 
 
-## @description  If this patches actually patches test-patch.sh, then
+## @description  If this actually patches the files used for the QA process
+## @description  under dev-support and its subdirectories, then
 ## @description  run with the patched version for the test.
 ## @audience     private
 ## @stability    evolving
@@ -1489,7 +1490,7 @@ function check_reexec
   fi
 
   if [[ ! ${CHANGED_FILES} =~ dev-support/test-patch
-      || ${CHANGED_FILES} =~ dev-support/smart-apply ]] ; then
+     && ! ${CHANGED_FILES} =~ dev-support/smart-apply ]] ; then
     return
   fi
 
@@ -1510,7 +1511,7 @@ function check_reexec
 
     rm "${commentfile}" 2>/dev/null
 
-    echo "(!) A patch to test-patch or smart-apply-patch has been detected. " > "${commentfile}"
+    echo "(!) A patch to the files used for the QA process has been detected. " > "${commentfile}"
     echo "Re-executing against the patched versions to perform further tests. " >> "${commentfile}"
     echo "The console is at ${BUILD_URL}console in case of problems." >> "${commentfile}"
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f029f9b9/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a0e6e90..5c1fe41 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -811,6 +811,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12042. Users may see TrashPolicy if hdfs dfs -rm is run
     (Andreina J via vinayakumarb)
 
+    HADOOP-11991. test-patch.sh isn't re-executed even if smart-apply-patch.sh
+    is modified. (Kengo Seki via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[02/50] hadoop git commit: HDFS-8386. Improve synchronization of 'streamer' reference in DFSOutputStream. Contributed by Rakesh R.

Posted by zj...@apache.org.
HDFS-8386. Improve synchronization of 'streamer' reference in DFSOutputStream. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd224caf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd224caf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd224caf

Branch: refs/heads/YARN-2928
Commit: bd224cafff167f713704d45244b713cb00202af3
Parents: 0a43670
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Jun 2 15:39:24 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:11 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 159 +++++++++++--------
 2 files changed, 92 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd224caf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0822f90..9d427ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -591,6 +591,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8489. Subclass BlockInfo to represent contiguous blocks.
     (Zhe Zhang via jing9)
 
+    HDFS-8386. Improve synchronization of 'streamer' reference in
+    DFSOutputStream. (Rakesh R via wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd224caf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index ae5d3eb..1dc4a9f 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -139,7 +139,7 @@ public class DFSOutputStream extends FSOutputSummer
   @Override
   protected void checkClosed() throws IOException {
     if (isClosed()) {
-      streamer.getLastException().throwException4Close();
+      getStreamer().getLastException().throwException4Close();
     }
   }
 
@@ -148,10 +148,10 @@ public class DFSOutputStream extends FSOutputSummer
   //
   @VisibleForTesting
   public synchronized DatanodeInfo[] getPipeline() {
-    if (streamer.streamerClosed()) {
+    if (getStreamer().streamerClosed()) {
       return null;
     }
-    DatanodeInfo[] currentNodes = streamer.getNodes();
+    DatanodeInfo[] currentNodes = getStreamer().getNodes();
     if (currentNodes == null) {
       return null;
     }
@@ -293,9 +293,9 @@ public class DFSOutputStream extends FSOutputSummer
       // indicate that we are appending to an existing block
       streamer = new DataStreamer(lastBlock, stat, dfsClient, src, progress, checksum,
           cachingStrategy, byteArrayManager);
-      streamer.setBytesCurBlock(lastBlock.getBlockSize());
+      getStreamer().setBytesCurBlock(lastBlock.getBlockSize());
       adjustPacketChunkSize(stat);
-      streamer.setPipelineInConstruction(lastBlock);
+      getStreamer().setPipelineInConstruction(lastBlock);
     } else {
       computePacketChunkSize(dfsClient.getConf().getWritePacketSize(),
           bytesPerChecksum);
@@ -329,7 +329,7 @@ public class DFSOutputStream extends FSOutputSummer
       //
       computePacketChunkSize(0, freeInCksum);
       setChecksumBufSize(freeInCksum);
-      streamer.setAppendChunk(true);
+      getStreamer().setAppendChunk(true);
     } else {
       // if the remaining space in the block is smaller than
       // that expected size of of a packet, then create
@@ -392,36 +392,36 @@ public class DFSOutputStream extends FSOutputSummer
     }
 
     if (currentPacket == null) {
-      currentPacket = createPacket(packetSize, chunksPerPacket, 
-          streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), false);
+      currentPacket = createPacket(packetSize, chunksPerPacket, getStreamer()
+          .getBytesCurBlock(), getStreamer().getAndIncCurrentSeqno(), false);
       if (DFSClient.LOG.isDebugEnabled()) {
         DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno=" + 
             currentPacket.getSeqno() +
             ", src=" + src +
             ", packetSize=" + packetSize +
             ", chunksPerPacket=" + chunksPerPacket +
-            ", bytesCurBlock=" + streamer.getBytesCurBlock());
+            ", bytesCurBlock=" + getStreamer().getBytesCurBlock());
       }
     }
 
     currentPacket.writeChecksum(checksum, ckoff, cklen);
     currentPacket.writeData(b, offset, len);
     currentPacket.incNumChunks();
-    streamer.incBytesCurBlock(len);
+    getStreamer().incBytesCurBlock(len);
 
     // If packet is full, enqueue it for transmission
     //
     if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
-        streamer.getBytesCurBlock() == blockSize) {
+        getStreamer().getBytesCurBlock() == blockSize) {
       if (DFSClient.LOG.isDebugEnabled()) {
         DFSClient.LOG.debug("DFSClient writeChunk packet full seqno=" +
             currentPacket.getSeqno() +
             ", src=" + src +
-            ", bytesCurBlock=" + streamer.getBytesCurBlock() +
+            ", bytesCurBlock=" + getStreamer().getBytesCurBlock() +
             ", blockSize=" + blockSize +
-            ", appendChunk=" + streamer.getAppendChunk());
+            ", appendChunk=" + getStreamer().getAppendChunk());
       }
-      streamer.waitAndQueuePacket(currentPacket);
+      getStreamer().waitAndQueuePacket(currentPacket);
       currentPacket = null;
 
       adjustChunkBoundary();
@@ -436,14 +436,14 @@ public class DFSOutputStream extends FSOutputSummer
    * crc chunks from now on.
    */
   protected void adjustChunkBoundary() {
-    if (streamer.getAppendChunk() &&
-        streamer.getBytesCurBlock() % bytesPerChecksum == 0) {
-      streamer.setAppendChunk(false);
+    if (getStreamer().getAppendChunk() &&
+        getStreamer().getBytesCurBlock() % bytesPerChecksum == 0) {
+      getStreamer().setAppendChunk(false);
       resetChecksumBufSize();
     }
 
-    if (!streamer.getAppendChunk()) {
-      int psize = Math.min((int)(blockSize- streamer.getBytesCurBlock()),
+    if (!getStreamer().getAppendChunk()) {
+      int psize = Math.min((int)(blockSize- getStreamer().getBytesCurBlock()),
           dfsClient.getConf().getWritePacketSize());
       computePacketChunkSize(psize, bytesPerChecksum);
     }
@@ -456,13 +456,13 @@ public class DFSOutputStream extends FSOutputSummer
    * @throws IOException
    */
   protected void endBlock() throws IOException {
-    if (streamer.getBytesCurBlock() == blockSize) {
-      currentPacket = createPacket(0, 0, streamer.getBytesCurBlock(),
-          streamer.getAndIncCurrentSeqno(), true);
+    if (getStreamer().getBytesCurBlock() == blockSize) {
+      currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
+          getStreamer().getAndIncCurrentSeqno(), true);
       currentPacket.setSyncBlock(shouldSyncBlock);
-      streamer.waitAndQueuePacket(currentPacket);
+      getStreamer().waitAndQueuePacket(currentPacket);
       currentPacket = null;
-      streamer.setBytesCurBlock(0);
+      getStreamer().setBytesCurBlock(0);
       lastFlushOffset = 0;
     }
   }
@@ -551,30 +551,33 @@ public class DFSOutputStream extends FSOutputSummer
 
         if (DFSClient.LOG.isDebugEnabled()) {
           DFSClient.LOG.debug("DFSClient flush(): "
-              + " bytesCurBlock=" + streamer.getBytesCurBlock()
+              + " bytesCurBlock=" + getStreamer().getBytesCurBlock()
               + " lastFlushOffset=" + lastFlushOffset
               + " createNewBlock=" + endBlock);
         }
         // Flush only if we haven't already flushed till this offset.
-        if (lastFlushOffset != streamer.getBytesCurBlock()) {
-          assert streamer.getBytesCurBlock() > lastFlushOffset;
+        if (lastFlushOffset != getStreamer().getBytesCurBlock()) {
+          assert getStreamer().getBytesCurBlock() > lastFlushOffset;
           // record the valid offset of this flush
-          lastFlushOffset = streamer.getBytesCurBlock();
+          lastFlushOffset = getStreamer().getBytesCurBlock();
           if (isSync && currentPacket == null && !endBlock) {
             // Nothing to send right now,
             // but sync was requested.
             // Send an empty packet if we do not end the block right now
             currentPacket = createPacket(packetSize, chunksPerPacket,
-                streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), false);
+                getStreamer().getBytesCurBlock(), getStreamer()
+                    .getAndIncCurrentSeqno(), false);
           }
         } else {
-          if (isSync && streamer.getBytesCurBlock() > 0 && !endBlock) {
+          if (isSync && getStreamer().getBytesCurBlock() > 0 && !endBlock) {
             // Nothing to send right now,
             // and the block was partially written,
             // and sync was requested.
-            // So send an empty sync packet if we do not end the block right now
+            // So send an empty sync packet if we do not end the block right
+            // now
             currentPacket = createPacket(packetSize, chunksPerPacket,
-                streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), false);
+                getStreamer().getBytesCurBlock(), getStreamer()
+                    .getAndIncCurrentSeqno(), false);
           } else if (currentPacket != null) {
             // just discard the current packet since it is already been sent.
             currentPacket.releaseBuffer(byteArrayManager);
@@ -583,42 +586,44 @@ public class DFSOutputStream extends FSOutputSummer
         }
         if (currentPacket != null) {
           currentPacket.setSyncBlock(isSync);
-          streamer.waitAndQueuePacket(currentPacket);
+          getStreamer().waitAndQueuePacket(currentPacket);
           currentPacket = null;
         }
-        if (endBlock && streamer.getBytesCurBlock() > 0) {
+        if (endBlock && getStreamer().getBytesCurBlock() > 0) {
           // Need to end the current block, thus send an empty packet to
           // indicate this is the end of the block and reset bytesCurBlock
-          currentPacket = createPacket(0, 0, streamer.getBytesCurBlock(),
-              streamer.getAndIncCurrentSeqno(), true);
+          currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
+              getStreamer().getAndIncCurrentSeqno(), true);
           currentPacket.setSyncBlock(shouldSyncBlock || isSync);
-          streamer.waitAndQueuePacket(currentPacket);
+          getStreamer().waitAndQueuePacket(currentPacket);
           currentPacket = null;
-          streamer.setBytesCurBlock(0);
+          getStreamer().setBytesCurBlock(0);
           lastFlushOffset = 0;
         } else {
           // Restore state of stream. Record the last flush offset
           // of the last full chunk that was flushed.
-          streamer.setBytesCurBlock(streamer.getBytesCurBlock() - numKept);
+          getStreamer().setBytesCurBlock(
+              getStreamer().getBytesCurBlock() - numKept);
         }
 
-        toWaitFor = streamer.getLastQueuedSeqno();
+        toWaitFor = getStreamer().getLastQueuedSeqno();
       } // end synchronized
 
-      streamer.waitForAckedSeqno(toWaitFor);
+      getStreamer().waitForAckedSeqno(toWaitFor);
 
       // update the block length first time irrespective of flag
-      if (updateLength || streamer.getPersistBlocks().get()) {
+      if (updateLength || getStreamer().getPersistBlocks().get()) {
         synchronized (this) {
-          if (!streamer.streamerClosed() && streamer.getBlock() != null) {
-            lastBlockLength = streamer.getBlock().getNumBytes();
+          if (!getStreamer().streamerClosed()
+              && getStreamer().getBlock() != null) {
+            lastBlockLength = getStreamer().getBlock().getNumBytes();
           }
         }
       }
       // If 1) any new blocks were allocated since the last flush, or 2) to
       // update length in NN is required, then persist block locations on
       // namenode.
-      if (streamer.getPersistBlocks().getAndSet(false) || updateLength) {
+      if (getStreamer().getPersistBlocks().getAndSet(false) || updateLength) {
         try {
           dfsClient.namenode.fsync(src, fileId, dfsClient.clientName,
               lastBlockLength);
@@ -635,8 +640,8 @@ public class DFSOutputStream extends FSOutputSummer
       }
 
       synchronized(this) {
-        if (!streamer.streamerClosed()) {
-          streamer.setHflush();
+        if (!getStreamer().streamerClosed()) {
+          getStreamer().setHflush();
         }
       }
     } catch (InterruptedIOException interrupt) {
@@ -648,7 +653,7 @@ public class DFSOutputStream extends FSOutputSummer
       DFSClient.LOG.warn("Error while syncing", e);
       synchronized (this) {
         if (!isClosed()) {
-          streamer.getLastException().set(e);
+          getStreamer().getLastException().set(e);
           closeThreads(true);
         }
       }
@@ -673,10 +678,10 @@ public class DFSOutputStream extends FSOutputSummer
   public synchronized int getCurrentBlockReplication() throws IOException {
     dfsClient.checkOpen();
     checkClosed();
-    if (streamer.streamerClosed()) {
+    if (getStreamer().streamerClosed()) {
       return blockReplication; // no pipeline, return repl factor of file
     }
-    DatanodeInfo[] currentNodes = streamer.getNodes();
+    DatanodeInfo[] currentNodes = getStreamer().getNodes();
     if (currentNodes == null) {
       return blockReplication; // no pipeline, return repl factor of file
     }
@@ -695,16 +700,16 @@ public class DFSOutputStream extends FSOutputSummer
       //
       // If there is data in the current buffer, send it across
       //
-      streamer.queuePacket(currentPacket);
+      getStreamer().queuePacket(currentPacket);
       currentPacket = null;
-      toWaitFor = streamer.getLastQueuedSeqno();
+      toWaitFor = getStreamer().getLastQueuedSeqno();
     }
 
-    streamer.waitForAckedSeqno(toWaitFor);
+    getStreamer().waitForAckedSeqno(toWaitFor);
   }
 
   protected synchronized void start() {
-    streamer.start();
+    getStreamer().start();
   }
   
   /**
@@ -715,32 +720,32 @@ public class DFSOutputStream extends FSOutputSummer
     if (isClosed()) {
       return;
     }
-    streamer.getLastException().set(new IOException("Lease timeout of "
+    getStreamer().getLastException().set(new IOException("Lease timeout of "
         + (dfsClient.getConf().getHdfsTimeout()/1000) + " seconds expired."));
     closeThreads(true);
     dfsClient.endFileLease(fileId);
   }
 
   boolean isClosed() {
-    return closed || streamer.streamerClosed();
+    return closed || getStreamer().streamerClosed();
   }
 
   void setClosed() {
     closed = true;
-    streamer.release();
+    getStreamer().release();
   }
 
   // shutdown datastreamer and responseprocessor threads.
   // interrupt datastreamer if force is true
   protected void closeThreads(boolean force) throws IOException {
     try {
-      streamer.close(force);
-      streamer.join();
-      streamer.closeSocket();
+      getStreamer().close(force);
+      getStreamer().join();
+      getStreamer().closeSocket();
     } catch (InterruptedException e) {
       throw new IOException("Failed to shutdown streamer");
     } finally {
-      streamer.setSocketToNull();
+      getStreamer().setSocketToNull();
       setClosed();
     }
   }
@@ -762,7 +767,7 @@ public class DFSOutputStream extends FSOutputSummer
 
   protected synchronized void closeImpl() throws IOException {
     if (isClosed()) {
-      streamer.getLastException().check(true);
+      getStreamer().getLastException().check(true);
       return;
     }
 
@@ -770,20 +775,20 @@ public class DFSOutputStream extends FSOutputSummer
       flushBuffer();       // flush from all upper layers
 
       if (currentPacket != null) {
-        streamer.waitAndQueuePacket(currentPacket);
+        getStreamer().waitAndQueuePacket(currentPacket);
         currentPacket = null;
       }
 
-      if (streamer.getBytesCurBlock() != 0) {
+      if (getStreamer().getBytesCurBlock() != 0) {
         // send an empty packet to mark the end of the block
-        currentPacket = createPacket(0, 0, streamer.getBytesCurBlock(),
-            streamer.getAndIncCurrentSeqno(), true);
+        currentPacket = createPacket(0, 0, getStreamer().getBytesCurBlock(),
+            getStreamer().getAndIncCurrentSeqno(), true);
         currentPacket.setSyncBlock(shouldSyncBlock);
       }
 
       flushInternal();             // flush all data to Datanodes
       // get last block before destroying the streamer
-      ExtendedBlock lastBlock = streamer.getBlock();
+      ExtendedBlock lastBlock = getStreamer().getBlock();
       closeThreads(false);
       TraceScope scope = Trace.startSpan("completeFile", Sampler.NEVER);
       try {
@@ -841,7 +846,7 @@ public class DFSOutputStream extends FSOutputSummer
 
   @VisibleForTesting
   public void setArtificialSlowdown(long period) {
-    streamer.setArtificialSlowdown(period);
+    getStreamer().setArtificialSlowdown(period);
   }
 
   @VisibleForTesting
@@ -868,7 +873,7 @@ public class DFSOutputStream extends FSOutputSummer
    * Returns the access token currently used by streamer, for testing only
    */
   synchronized Token<BlockTokenIdentifier> getBlockToken() {
-    return streamer.getBlockToken();
+    return getStreamer().getBlockToken();
   }
 
   @Override
@@ -885,11 +890,25 @@ public class DFSOutputStream extends FSOutputSummer
 
   @VisibleForTesting
   ExtendedBlock getBlock() {
-    return streamer.getBlock();
+    return getStreamer().getBlock();
   }
 
   @VisibleForTesting
   public long getFileId() {
     return fileId;
   }
+
+  /**
+   * Set the data streamer object.
+   */
+  protected synchronized void setStreamer(DataStreamer streamer) {
+    this.streamer = streamer;
+  }
+
+  /**
+   * Returns the data streamer object.
+   */
+  protected synchronized DataStreamer getStreamer() {
+    return streamer;
+  }
 }


[33/50] hadoop git commit: Add missing test file of YARN-3733

Posted by zj...@apache.org.
Add missing test file of YARN-3733


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eba031e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eba031e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eba031e1

Branch: refs/heads/YARN-2928
Commit: eba031e1d13508bb62723625828fae267b043de1
Parents: 96a8d01
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Jun 4 13:18:25 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:56:58 2015 -0700

----------------------------------------------------------------------
 .../util/resource/TestResourceCalculator.java   | 125 +++++++++++++++++++
 1 file changed, 125 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eba031e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
new file mode 100644
index 0000000..6a0b62e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util.resource;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class TestResourceCalculator {
+  private ResourceCalculator resourceCalculator;
+
+  @Parameterized.Parameters
+  public static Collection<ResourceCalculator[]> getParameters() {
+    return Arrays.asList(new ResourceCalculator[][] {
+        { new DefaultResourceCalculator() },
+        { new DominantResourceCalculator() } });
+  }
+
+  public TestResourceCalculator(ResourceCalculator rs) {
+    this.resourceCalculator = rs;
+  }
+
+  @Test(timeout = 10000)
+  public void testResourceCalculatorCompareMethod() {
+    Resource clusterResource = Resource.newInstance(0, 0);
+
+    // For lhs == rhs
+    Resource lhs = Resource.newInstance(0, 0);
+    Resource rhs = Resource.newInstance(0, 0);
+    assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
+        true, lhs, lhs);
+
+    // lhs > rhs
+    lhs = Resource.newInstance(1, 1);
+    rhs = Resource.newInstance(0, 0);
+    assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
+        true, lhs, rhs);
+
+    // For lhs < rhs
+    lhs = Resource.newInstance(0, 0);
+    rhs = Resource.newInstance(1, 1);
+    assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
+        false, rhs, lhs);
+
+    if (!(resourceCalculator instanceof DominantResourceCalculator)) {
+      return;
+    }
+
+    // verify for 2 dimensional resources i.e memory and cpu
+    // dominant resource types
+    lhs = Resource.newInstance(1, 0);
+    rhs = Resource.newInstance(0, 1);
+    assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
+        true, lhs, lhs);
+
+    lhs = Resource.newInstance(0, 1);
+    rhs = Resource.newInstance(1, 0);
+    assertResourcesOperations(clusterResource, lhs, rhs, false, true, false,
+        true, lhs, lhs);
+
+    lhs = Resource.newInstance(1, 1);
+    rhs = Resource.newInstance(1, 0);
+    assertResourcesOperations(clusterResource, lhs, rhs, false, false, true,
+        true, lhs, rhs);
+
+    lhs = Resource.newInstance(0, 1);
+    rhs = Resource.newInstance(1, 1);
+    assertResourcesOperations(clusterResource, lhs, rhs, true, true, false,
+        false, rhs, lhs);
+
+  }
+
+
+  private void assertResourcesOperations(Resource clusterResource,
+      Resource lhs, Resource rhs, boolean lessThan, boolean lessThanOrEqual,
+      boolean greaterThan, boolean greaterThanOrEqual, Resource max,
+      Resource min) {
+
+    Assert.assertEquals("Less Than operation is wrongly calculated.", lessThan,
+        Resources.lessThan(resourceCalculator, clusterResource, lhs, rhs));
+
+    Assert.assertEquals(
+        "Less Than Or Equal To operation is wrongly calculated.",
+        lessThanOrEqual, Resources.lessThanOrEqual(resourceCalculator,
+            clusterResource, lhs, rhs));
+
+    Assert.assertEquals("Greater Than operation is wrongly calculated.",
+        greaterThan,
+        Resources.greaterThan(resourceCalculator, clusterResource, lhs, rhs));
+
+    Assert.assertEquals(
+        "Greater Than Or Equal To operation is wrongly calculated.",
+        greaterThanOrEqual, Resources.greaterThanOrEqual(resourceCalculator,
+            clusterResource, lhs, rhs));
+
+    Assert.assertEquals("Max(value) Operation wrongly calculated.", max,
+        Resources.max(resourceCalculator, clusterResource, lhs, rhs));
+
+    Assert.assertEquals("Min(value) operation is wrongly calculated.", min,
+        Resources.min(resourceCalculator, clusterResource, lhs, rhs));
+  }
+
+}
\ No newline at end of file


[50/50] hadoop git commit: YARN-3780. Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.

Posted by zj...@apache.org.
YARN-3780. Should use equals when compare Resource in
RMNodeImpl#ReconnectNodeTransition. Contributed by zhihai xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ecbac8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ecbac8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ecbac8a

Branch: refs/heads/YARN-2928
Commit: 3ecbac8aad1d1ea88685f4b915a2605617b21b4e
Parents: 0de32f7
Author: Devaraj K <de...@apache.org>
Authored: Mon Jun 8 11:54:55 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:57:02 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java     | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ecbac8a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 67a705c..da4f3b2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -592,6 +592,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3655. FairScheduler: potential livelock due to maxAMShare limitation
     and container reservation. (Zhihai Xu via kasha)
 
+    YARN-3780. Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition.
+    (zhihai xu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ecbac8a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 1263692..8a810cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -622,7 +622,8 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
         rmNode.httpPort = newNode.getHttpPort();
         rmNode.httpAddress = newNode.getHttpAddress();
         boolean isCapabilityChanged = false;
-        if (rmNode.getTotalCapability() != newNode.getTotalCapability()) {
+        if (!rmNode.getTotalCapability().equals(
+            newNode.getTotalCapability())) {
           rmNode.totalCapability = newNode.getTotalCapability();
           isCapabilityChanged = true;
         }


[14/50] hadoop git commit: YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)

Posted by zj...@apache.org.
YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9e8f791
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9e8f791
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9e8f791

Branch: refs/heads/YARN-2928
Commit: b9e8f791333fcebd79e96b3ccb8f998572aecaa1
Parents: 95dd42b
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Jun 3 13:47:24 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Jun 8 09:43:14 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   2 +
 .../scheduler/fair/FSParentQueue.java           | 219 ++++++++++++++-----
 .../scheduler/fair/QueueManager.java            |   3 +-
 3 files changed, 164 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e8f791/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1841d80..fb9badc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -573,6 +573,8 @@ Release 2.8.0 - UNRELEASED
     YARN-3751. Fixed AppInfo to check if used resources are null. (Sunil G via
     zjshen)
 
+    YARN-3762. FairScheduler: CME on FSParentQueue#getQueueUserAclInfo. (kasha)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e8f791/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index f74106a..7d2e5b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -23,6 +23,9 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,36 +47,64 @@ public class FSParentQueue extends FSQueue {
   private static final Log LOG = LogFactory.getLog(
       FSParentQueue.class.getName());
 
-  private final List<FSQueue> childQueues = 
-      new ArrayList<FSQueue>();
+  private final List<FSQueue> childQueues = new ArrayList<>();
   private Resource demand = Resources.createResource(0);
   private int runnableApps;
-  
+
+  private ReadWriteLock rwLock = new ReentrantReadWriteLock();
+  private Lock readLock = rwLock.readLock();
+  private Lock writeLock = rwLock.writeLock();
+
   public FSParentQueue(String name, FairScheduler scheduler,
       FSParentQueue parent) {
     super(name, scheduler, parent);
   }
   
   public void addChildQueue(FSQueue child) {
-    childQueues.add(child);
+    writeLock.lock();
+    try {
+      childQueues.add(child);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  public void removeChildQueue(FSQueue child) {
+    writeLock.lock();
+    try {
+      childQueues.remove(child);
+    } finally {
+      writeLock.unlock();
+    }
   }
 
   @Override
   public void recomputeShares() {
-    policy.computeShares(childQueues, getFairShare());
-    for (FSQueue childQueue : childQueues) {
-      childQueue.getMetrics().setFairShare(childQueue.getFairShare());
-      childQueue.recomputeShares();
+    readLock.lock();
+    try {
+      policy.computeShares(childQueues, getFairShare());
+      for (FSQueue childQueue : childQueues) {
+        childQueue.getMetrics().setFairShare(childQueue.getFairShare());
+        childQueue.recomputeShares();
+      }
+    } finally {
+      readLock.unlock();
     }
   }
 
   public void recomputeSteadyShares() {
-    policy.computeSteadyShares(childQueues, getSteadyFairShare());
-    for (FSQueue childQueue : childQueues) {
-      childQueue.getMetrics().setSteadyFairShare(childQueue.getSteadyFairShare());
-      if (childQueue instanceof FSParentQueue) {
-        ((FSParentQueue) childQueue).recomputeSteadyShares();
+    readLock.lock();
+    try {
+      policy.computeSteadyShares(childQueues, getSteadyFairShare());
+      for (FSQueue childQueue : childQueues) {
+        childQueue.getMetrics()
+            .setSteadyFairShare(childQueue.getSteadyFairShare());
+        if (childQueue instanceof FSParentQueue) {
+          ((FSParentQueue) childQueue).recomputeSteadyShares();
+        }
       }
+    } finally {
+      readLock.unlock();
     }
   }
 
@@ -81,21 +112,37 @@ public class FSParentQueue extends FSQueue {
   public void updatePreemptionVariables() {
     super.updatePreemptionVariables();
     // For child queues
-    for (FSQueue childQueue : childQueues) {
-      childQueue.updatePreemptionVariables();
+
+    readLock.lock();
+    try {
+      for (FSQueue childQueue : childQueues) {
+        childQueue.updatePreemptionVariables();
+      }
+    } finally {
+      readLock.unlock();
     }
   }
 
   @Override
   public Resource getDemand() {
-    return demand;
+    readLock.lock();
+    try {
+      return Resource.newInstance(demand.getMemory(), demand.getVirtualCores());
+    } finally {
+      readLock.unlock();
+    }
   }
 
   @Override
   public Resource getResourceUsage() {
     Resource usage = Resources.createResource(0);
-    for (FSQueue child : childQueues) {
-      Resources.addTo(usage, child.getResourceUsage());
+    readLock.lock();
+    try {
+      for (FSQueue child : childQueues) {
+        Resources.addTo(usage, child.getResourceUsage());
+      }
+    } finally {
+      readLock.unlock();
     }
     return usage;
   }
@@ -106,20 +153,25 @@ public class FSParentQueue extends FSQueue {
     // Limit demand to maxResources
     Resource maxRes = scheduler.getAllocationConfiguration()
         .getMaxResources(getName());
-    demand = Resources.createResource(0);
-    for (FSQueue childQueue : childQueues) {
-      childQueue.updateDemand();
-      Resource toAdd = childQueue.getDemand();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Counting resource from " + childQueue.getName() + " " + 
-            toAdd + "; Total resource consumption for " + getName() +
-            " now " + demand);
-      }
-      demand = Resources.add(demand, toAdd);
-      demand = Resources.componentwiseMin(demand, maxRes);
-      if (Resources.equals(demand, maxRes)) {
-        break;
+    writeLock.lock();
+    try {
+      demand = Resources.createResource(0);
+      for (FSQueue childQueue : childQueues) {
+        childQueue.updateDemand();
+        Resource toAdd = childQueue.getDemand();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Counting resource from " + childQueue.getName() + " " +
+              toAdd + "; Total resource consumption for " + getName() +
+              " now " + demand);
+        }
+        demand = Resources.add(demand, toAdd);
+        demand = Resources.componentwiseMin(demand, maxRes);
+        if (Resources.equals(demand, maxRes)) {
+          break;
+        }
       }
+    } finally {
+      writeLock.unlock();
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug("The updated demand for " + getName() + " is " + demand +
@@ -127,33 +179,31 @@ public class FSParentQueue extends FSQueue {
     }    
   }
   
-  private synchronized QueueUserACLInfo getUserAclInfo(
-      UserGroupInformation user) {
-    QueueUserACLInfo userAclInfo = 
-      recordFactory.newRecordInstance(QueueUserACLInfo.class);
-    List<QueueACL> operations = new ArrayList<QueueACL>();
+  private QueueUserACLInfo getUserAclInfo(UserGroupInformation user) {
+    List<QueueACL> operations = new ArrayList<>();
     for (QueueACL operation : QueueACL.values()) {
       if (hasAccess(operation, user)) {
         operations.add(operation);
       } 
     }
-
-    userAclInfo.setQueueName(getQueueName());
-    userAclInfo.setUserAcls(operations);
-    return userAclInfo;
+    return QueueUserACLInfo.newInstance(getQueueName(), operations);
   }
   
   @Override
-  public synchronized List<QueueUserACLInfo> getQueueUserAclInfo(
-      UserGroupInformation user) {
+  public List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user) {
     List<QueueUserACLInfo> userAcls = new ArrayList<QueueUserACLInfo>();
     
     // Add queue acls
     userAcls.add(getUserAclInfo(user));
     
     // Add children queue acls
-    for (FSQueue child : childQueues) {
-      userAcls.addAll(child.getQueueUserAclInfo(user));
+    readLock.lock();
+    try {
+      for (FSQueue child : childQueues) {
+        userAcls.addAll(child.getQueueUserAclInfo(user));
+      }
+    } finally {
+      readLock.unlock();
     }
  
     return userAcls;
@@ -168,12 +218,32 @@ public class FSParentQueue extends FSQueue {
       return assigned;
     }
 
-    Collections.sort(childQueues, policy.getComparator());
-    for (FSQueue child : childQueues) {
-      assigned = child.assignContainer(node);
-      if (!Resources.equals(assigned, Resources.none())) {
-        break;
+    // Hold the write lock when sorting childQueues
+    writeLock.lock();
+    try {
+      Collections.sort(childQueues, policy.getComparator());
+    } finally {
+      writeLock.unlock();
+    }
+
+    /*
+     * We are releasing the lock between the sort and iteration of the
+     * "sorted" list. There could be changes to the list here:
+     * 1. Add a child queue to the end of the list, this doesn't affect
+     * container assignment.
+     * 2. Remove a child queue, this is probably good to take care of so we
+     * don't assign to a queue that is going to be removed shortly.
+     */
+    readLock.lock();
+    try {
+      for (FSQueue child : childQueues) {
+        assigned = child.assignContainer(node);
+        if (!Resources.equals(assigned, Resources.none())) {
+          break;
+        }
       }
+    } finally {
+      readLock.unlock();
     }
     return assigned;
   }
@@ -185,11 +255,17 @@ public class FSParentQueue extends FSQueue {
     // Find the childQueue which is most over fair share
     FSQueue candidateQueue = null;
     Comparator<Schedulable> comparator = policy.getComparator();
-    for (FSQueue queue : childQueues) {
-      if (candidateQueue == null ||
-          comparator.compare(queue, candidateQueue) > 0) {
-        candidateQueue = queue;
+
+    readLock.lock();
+    try {
+      for (FSQueue queue : childQueues) {
+        if (candidateQueue == null ||
+            comparator.compare(queue, candidateQueue) > 0) {
+          candidateQueue = queue;
+        }
       }
+    } finally {
+      readLock.unlock();
     }
 
     // Let the selected queue choose which of its container to preempt
@@ -201,7 +277,12 @@ public class FSParentQueue extends FSQueue {
 
   @Override
   public List<FSQueue> getChildQueues() {
-    return childQueues;
+    readLock.lock();
+    try {
+      return Collections.unmodifiableList(childQueues);
+    } finally {
+      readLock.unlock();
+    }
   }
 
   @Override
@@ -218,23 +299,43 @@ public class FSParentQueue extends FSQueue {
   }
   
   public void incrementRunnableApps() {
-    runnableApps++;
+    writeLock.lock();
+    try {
+      runnableApps++;
+    } finally {
+      writeLock.unlock();
+    }
   }
   
   public void decrementRunnableApps() {
-    runnableApps--;
+    writeLock.lock();
+    try {
+      runnableApps--;
+    } finally {
+      writeLock.unlock();
+    }
   }
 
   @Override
   public int getNumRunnableApps() {
-    return runnableApps;
+    readLock.lock();
+    try {
+      return runnableApps;
+    } finally {
+      readLock.unlock();
+    }
   }
 
   @Override
   public void collectSchedulerApplications(
       Collection<ApplicationAttemptId> apps) {
-    for (FSQueue childQueue : childQueues) {
-      childQueue.collectSchedulerApplications(apps);
+    readLock.lock();
+    try {
+      for (FSQueue childQueue : childQueues) {
+        childQueue.collectSchedulerApplications(apps);
+      }
+    } finally {
+      readLock.unlock();
     }
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e8f791/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
index 64442ab..6556717 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
@@ -304,7 +304,8 @@ public class QueueManager {
       }
     }
     queues.remove(queue.getName());
-    queue.getParent().getChildQueues().remove(queue);
+    FSParentQueue parent = queue.getParent();
+    parent.removeChildQueue(queue);
   }
   
   /**