You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/03/09 18:45:01 UTC

[01/34] hadoop git commit: HADOOP-12717. NPE when trying to rename a directory in Windows Azure Storage FileSystem. Contributed by Robert Yokota and Gaurav Kanade.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 78d7ee492 -> e7fd4de8b


HADOOP-12717. NPE when trying to rename a directory in Windows Azure Storage FileSystem. Contributed by Robert Yokota and Gaurav Kanade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c50aad0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c50aad0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c50aad0f

Branch: refs/heads/HDFS-1312
Commit: c50aad0f854b74ede9668e35db314b0a93be81b2
Parents: 2759689
Author: Chris Nauroth <cn...@apache.org>
Authored: Fri Mar 4 22:57:43 2016 -0800
Committer: Chris Nauroth <cn...@apache.org>
Committed: Fri Mar 4 22:57:43 2016 -0800

----------------------------------------------------------------------
 .../fs/azure/AzureNativeFileSystemStore.java    |  4 +-
 ...ativeAzureFileSystemAtomicRenameDirList.java | 49 ++++++++++++++++++++
 2 files changed, 52 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c50aad0f/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 7ccefd6..e261c4d 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -450,7 +450,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       // Add to this the hbase root directory, or /hbase is that is not set.
       hbaseRoot = verifyAndConvertToStandardFormat(
           sessionConfiguration.get("hbase.rootdir", "hbase"));
-      atomicRenameDirs.add(hbaseRoot);
+      if (hbaseRoot != null) {
+        atomicRenameDirs.add(hbaseRoot);
+      }
     } catch (URISyntaxException e) {
       LOG.warn("Unable to initialize HBase root as an atomic rename directory.");
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c50aad0f/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
new file mode 100644
index 0000000..b9cca25
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAtomicRenameDirList.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class TestNativeAzureFileSystemAtomicRenameDirList extends NativeAzureFileSystemBaseTest {
+  private AzureBlobStorageTestAccount testAccount;
+
+  // HBase-site config controlling HBase root dir
+  private static final String HBASE_ROOT_DIR_CONF_STRING = "hbase.rootdir";
+  private static final String HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS = "wasb://somedifferentfilesystem.blob.core.windows.net/hbase";
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    testAccount = AzureBlobStorageTestAccount.create();
+    return testAccount;
+  }
+
+  @Test
+  public void testAzureNativeStoreIsAtomicRenameKeyDoesNotThrowNPEOnInitializingWithNonDefaultURI () throws IOException {
+    NativeAzureFileSystem azureFs = (NativeAzureFileSystem)fs;
+    AzureNativeFileSystemStore azureStore = azureFs.getStore();
+    Configuration conf = fs.getConf();
+    conf.set(HBASE_ROOT_DIR_CONF_STRING, HBASE_ROOT_DIR_VALUE_ON_DIFFERENT_FS);
+    URI uri = fs.getUri();
+    fs.initialize(uri, conf);
+    azureStore.isAtomicRenameKey("anyrandomkey");
+  }
+}


[33/34] hadoop git commit: HDFS-9702. DiskBalancer: getVolumeMap implementation. (Contributed by Anu Engineer)

Posted by ar...@apache.org.
HDFS-9702. DiskBalancer: getVolumeMap implementation. (Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fc218b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fc218b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fc218b3

Branch: refs/heads/HDFS-1312
Commit: 6fc218b32e9ead381ebf9fada8120fcc5b174da5
Parents: 78d7ee4
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Mar 9 09:44:22 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Mar 9 09:44:22 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/DataNode.java   | 17 +++--
 .../hdfs/server/datanode/DiskBalancer.java      | 26 ++++++++
 .../diskbalancer/DiskBalancerException.java     |  3 +-
 .../diskbalancer/TestDiskBalancerRPC.java       | 66 ++++++++++++++++++++
 4 files changed, 107 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc218b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index fb86159..6c08b2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -169,6 +169,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -3339,8 +3340,8 @@ public class DataNode extends ReconfigurableBase
   }
 
   /**
-   * Gets a run-time configuration value from running diskbalancer instance. For
-   * example : Disk Balancer bandwidth of a running instance.
+   * Gets a runtime configuration value from  diskbalancer instance. For
+   * example : DiskBalancer bandwidth.
    *
    * @param key - String that represents the run time key value.
    * @return value of the key as a string.
@@ -3349,7 +3350,15 @@ public class DataNode extends ReconfigurableBase
   @Override
   public String getDiskBalancerSetting(String key) throws IOException {
     checkSuperuserPrivilege();
-    throw new DiskBalancerException("Not Implemented",
-        DiskBalancerException.Result.INTERNAL_ERROR);
+    Preconditions.checkNotNull(key);
+    switch (key) {
+    case DiskBalancerConstants.DISKBALANCER_VOLUME_NAME:
+      return this.diskBalancer.getVolumeNames();
+    default:
+      LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: " +
+          key);
+      throw new DiskBalancerException("Unknown key",
+          DiskBalancerException.Result.UNKNOWN_KEY);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc218b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index d5c402e..9e41d2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
 import org.apache.hadoop.util.Time;
+import org.codehaus.jackson.map.ObjectMapper;
 
 import java.io.IOException;
 import java.nio.charset.Charset;
@@ -221,6 +222,31 @@ public class DiskBalancer {
     }
   }
 
+  /**
+   * Returns a volume ID to Volume base path map.
+   *
+   * @return Json string of the volume map.
+   * @throws DiskBalancerException
+   */
+  public String getVolumeNames() throws DiskBalancerException {
+    lock.lock();
+    try {
+      checkDiskBalancerEnabled();
+      Map<String, String> pathMap = new HashMap<>();
+      Map<String, FsVolumeSpi> volMap = getStorageIDToVolumeMap();
+      for (Map.Entry<String, FsVolumeSpi> entry : volMap.entrySet()) {
+        pathMap.put(entry.getKey(), entry.getValue().getBasePath());
+      }
+      ObjectMapper mapper = new ObjectMapper();
+      return mapper.writeValueAsString(pathMap);
+    } catch (IOException e) {
+      throw new DiskBalancerException("Internal error, Unable to " +
+          "create JSON string.", e,
+          DiskBalancerException.Result.INTERNAL_ERROR);
+    } finally {
+      lock.unlock();
+    }
+  }
 
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc218b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
index 00fe53d..38455a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
@@ -36,7 +36,8 @@ public class DiskBalancerException extends IOException {
     INVALID_VOLUME,
     INVALID_MOVE,
     INTERNAL_ERROR,
-    NO_SUCH_PLAN
+    NO_SUCH_PLAN,
+    UNKNOWN_KEY
   }
 
   private final Result result;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc218b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
index e29b3b7..37a6216 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
@@ -24,18 +24,24 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException.*;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
+import org.hamcrest.*;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.util.HashMap;
+import java.util.Map;
 
 import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.NO_PLAN;
 import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.PLAN_DONE;
@@ -84,6 +90,8 @@ public class TestDiskBalancerRPC {
     int planVersion = rpcTestHelper.getPlanVersion();
     NodePlan plan = rpcTestHelper.getPlan();
     thrown.expect(DiskBalancerException.class);
+    thrown.expect(new
+        ResultVerifier(Result.INVALID_PLAN_HASH));
     dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, plan.toJson());
   }
 
@@ -96,6 +104,8 @@ public class TestDiskBalancerRPC {
     planVersion++;
     NodePlan plan = rpcTestHelper.getPlan();
     thrown.expect(DiskBalancerException.class);
+    thrown.expect(new
+        ResultVerifier(Result.INVALID_PLAN_VERSION));
     dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, plan.toJson());
   }
 
@@ -107,6 +117,8 @@ public class TestDiskBalancerRPC {
     int planVersion = rpcTestHelper.getPlanVersion();
     NodePlan plan = rpcTestHelper.getPlan();
     thrown.expect(DiskBalancerException.class);
+    thrown.expect(new
+        ResultVerifier(Result.INVALID_PLAN));
     dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, "");
   }
 
@@ -131,6 +143,8 @@ public class TestDiskBalancerRPC {
     planHash = String.valueOf(hashArray);
     NodePlan plan = rpcTestHelper.getPlan();
     thrown.expect(DiskBalancerException.class);
+    thrown.expect(new
+        ResultVerifier(Result.NO_SUCH_PLAN));
     dataNode.cancelDiskBalancePlan(planHash);
   }
 
@@ -141,9 +155,38 @@ public class TestDiskBalancerRPC {
     String planHash = "";
     NodePlan plan = rpcTestHelper.getPlan();
     thrown.expect(DiskBalancerException.class);
+    thrown.expect(new
+        ResultVerifier(Result.NO_SUCH_PLAN));
     dataNode.cancelDiskBalancePlan(planHash);
   }
 
+  @Test
+  public void testGetDiskBalancerVolumeMapping() throws Exception {
+    final int dnIndex = 0;
+    DataNode dataNode = cluster.getDataNodes().get(dnIndex);
+    String volumeNameJson = dataNode.getDiskBalancerSetting(
+        DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
+    Assert.assertNotNull(volumeNameJson);
+    ObjectMapper mapper = new ObjectMapper();
+
+    @SuppressWarnings("unchecked")
+    Map<String, String> volumemap =
+        mapper.readValue(volumeNameJson, HashMap.class);
+
+    Assert.assertEquals(2, volumemap.size());
+  }
+
+  @Test
+  public void testGetDiskBalancerInvalidSetting() throws Exception {
+    final int dnIndex = 0;
+    final String invalidSetting = "invalidSetting";
+    DataNode dataNode = cluster.getDataNodes().get(dnIndex);
+    thrown.expect(DiskBalancerException.class);
+    thrown.expect(new
+        ResultVerifier(Result.UNKNOWN_KEY));
+    dataNode.getDiskBalancerSetting(invalidSetting);
+  }
+
 
   @Test
   public void testQueryPlan() throws Exception {
@@ -173,6 +216,8 @@ public class TestDiskBalancerRPC {
     final int dnIndex = 0;
     DataNode dataNode = cluster.getDataNodes().get(dnIndex);
     thrown.expect(DiskBalancerException.class);
+    thrown.expect(new
+        ResultVerifier(Result.UNKNOWN_KEY));
     dataNode.getDiskBalancerSetting(
         DiskBalancerConstants.DISKBALANCER_BANDWIDTH);
   }
@@ -223,4 +268,25 @@ public class TestDiskBalancerRPC {
       return this;
     }
   }
+
+  private class ResultVerifier
+      extends TypeSafeMatcher<DiskBalancerException> {
+    private final DiskBalancerException.Result expectedResult;
+
+    ResultVerifier(DiskBalancerException.Result expectedResult){
+      this.expectedResult = expectedResult;
+    }
+
+    @Override
+    protected boolean matchesSafely(DiskBalancerException exception) {
+      return (this.expectedResult == exception.getResult());
+    }
+
+    @Override
+    public void describeTo(Description description) {
+      description.appendText("expects Result: ")
+          .appendValue(this.expectedResult);
+
+    }
+  }
 }


[18/34] hadoop git commit: HDFS-8786. Erasure coding: use simple replication for internal blocks on decommissioning datanodes. Contributed by Rakesh R.

Posted by ar...@apache.org.
HDFS-8786. Erasure coding: use simple replication for internal blocks on decommissioning datanodes. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/743a99f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/743a99f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/743a99f2

Branch: refs/heads/HDFS-1312
Commit: 743a99f2dbc9a27e19f92ff3551937d90dba2e89
Parents: f86850b
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Mar 8 10:24:57 2016 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Mar 8 10:24:57 2016 -0800

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |  14 +-
 .../BlockReconstructionWork.java                |  19 +-
 .../blockmanagement/ErasureCodingWork.java      |  67 ++-
 .../server/blockmanagement/ReplicationWork.java |   2 +-
 .../hdfs/TestDecommissionWithStriped.java       | 473 +++++++++++++++++++
 5 files changed, 547 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/743a99f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4123654..f12ea1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1610,7 +1610,7 @@ public class BlockManager implements BlockStatsMXBean {
       return null;
     }
 
-    final int additionalReplRequired;
+    int additionalReplRequired;
     if (numReplicas.liveReplicas() < requiredReplication) {
       additionalReplRequired = requiredReplication - numReplicas.liveReplicas()
           - pendingNum;
@@ -1624,6 +1624,13 @@ public class BlockManager implements BlockStatsMXBean {
         // Wait the previous reconstruction to finish.
         return null;
       }
+
+      // should reconstruct all the internal blocks before scheduling
+      // replication task for decommissioning node(s).
+      if (additionalReplRequired - numReplicas.decommissioning() > 0) {
+        additionalReplRequired = additionalReplRequired
+            - numReplicas.decommissioning();
+      }
       byte[] indices = new byte[liveBlockIndices.size()];
       for (int i = 0 ; i < liveBlockIndices.size(); i++) {
         indices[i] = liveBlockIndices.get(i);
@@ -1679,10 +1686,13 @@ public class BlockManager implements BlockStatsMXBean {
         // No use continuing, unless a new rack in this case
         return false;
       }
+      // mark that the reconstruction work is to replicate internal block to a
+      // new rack.
+      rw.setNotEnoughRack();
     }
 
     // Add block to the datanode's task list
-    rw.addTaskToDatanode();
+    rw.addTaskToDatanode(numReplicas);
     DatanodeStorageInfo.incrementBlocksScheduled(targets);
 
     // Move the block-replication into a "pending" state.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/743a99f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
index c1998ee..57121bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
@@ -47,6 +47,7 @@ abstract class BlockReconstructionWork {
 
   private DatanodeStorageInfo[] targets;
   private final int priority;
+  private boolean notEnoughRack = false;
 
   public BlockReconstructionWork(BlockInfo block,
       BlockCollection bc,
@@ -105,12 +106,26 @@ abstract class BlockReconstructionWork {
     return additionalReplRequired;
   }
 
+  /**
+   * Mark that the reconstruction work is to replicate internal block to a new
+   * rack.
+   */
+  void setNotEnoughRack() {
+    notEnoughRack = true;
+  }
+
+  boolean hasNotEnoughRack() {
+    return notEnoughRack;
+  }
+
   abstract void chooseTargets(BlockPlacementPolicy blockplacement,
       BlockStoragePolicySuite storagePolicySuite,
       Set<Node> excludedNodes);
 
   /**
-   * add reconstruction task into a source datanode
+   * Add reconstruction task into a source datanode.
+   *
+   * @param numberReplicas replica details
    */
-  abstract void addTaskToDatanode();
+  abstract void addTaskToDatanode(NumberReplicas numberReplicas);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/743a99f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
index 7877c56..d110b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.Node;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.BitSet;
 import java.util.HashMap;
 import java.util.List;
@@ -121,33 +120,55 @@ class ErasureCodingWork extends BlockReconstructionWork {
   }
 
   @Override
-  void addTaskToDatanode() {
-    assert getTargets().length > 0;
+  void addTaskToDatanode(NumberReplicas numberReplicas) {
+    final DatanodeStorageInfo[] targets = getTargets();
+    assert targets.length > 0;
     BlockInfoStriped stripedBlk = (BlockInfoStriped) getBlock();
 
-    // if we already have all the internal blocks, but not enough racks,
-    // we only need to replicate one internal block to a new rack
-    if (hasAllInternalBlocks()) {
+    if (hasNotEnoughRack()) {
+      // if we already have all the internal blocks, but not enough racks,
+      // we only need to replicate one internal block to a new rack
       int sourceIndex = chooseSource4SimpleReplication();
-      final byte blockIndex = liveBlockIndicies[sourceIndex];
-      final DatanodeDescriptor source = getSrcNodes()[sourceIndex];
-      final long internBlkLen = StripedBlockUtil.getInternalBlockLength(
-          stripedBlk.getNumBytes(), stripedBlk.getCellSize(),
-          stripedBlk.getDataBlockNum(), blockIndex);
-      final Block targetBlk = new Block(
-          stripedBlk.getBlockId() + blockIndex, internBlkLen,
-          stripedBlk.getGenerationStamp());
-      source.addBlockToBeReplicated(targetBlk, getTargets());
-      if (BlockManager.LOG.isDebugEnabled()) {
-        BlockManager.LOG.debug("Add replication task from source {} to " +
-            "targets {} for EC block {}", source, Arrays.toString(getTargets()),
-            targetBlk);
+      createReplicationWork(sourceIndex, targets[0]);
+    } else if (numberReplicas.decommissioning() > 0 && hasAllInternalBlocks()) {
+      List<Integer> decommissioningSources = findDecommissioningSources();
+      // decommissioningSources.size() should be >= targets.length
+      final int num = Math.min(decommissioningSources.size(), targets.length);
+      for (int i = 0; i < num; i++) {
+        createReplicationWork(decommissioningSources.get(i), targets[i]);
       }
     } else {
-      getTargets()[0].getDatanodeDescriptor().addBlockToBeErasureCoded(
-          new ExtendedBlock(blockPoolId, stripedBlk),
-          getSrcNodes(), getTargets(), getLiveBlockIndicies(),
-          stripedBlk.getErasureCodingPolicy());
+      targets[0].getDatanodeDescriptor().addBlockToBeErasureCoded(
+          new ExtendedBlock(blockPoolId, stripedBlk), getSrcNodes(), targets,
+          getLiveBlockIndicies(), stripedBlk.getErasureCodingPolicy());
     }
   }
+
+  private void createReplicationWork(int sourceIndex,
+      DatanodeStorageInfo target) {
+    BlockInfoStriped stripedBlk = (BlockInfoStriped) getBlock();
+    final byte blockIndex = liveBlockIndicies[sourceIndex];
+    final DatanodeDescriptor source = getSrcNodes()[sourceIndex];
+    final long internBlkLen = StripedBlockUtil.getInternalBlockLength(
+        stripedBlk.getNumBytes(), stripedBlk.getCellSize(),
+        stripedBlk.getDataBlockNum(), blockIndex);
+    final Block targetBlk = new Block(stripedBlk.getBlockId() + blockIndex,
+        internBlkLen, stripedBlk.getGenerationStamp());
+    source.addBlockToBeReplicated(targetBlk,
+        new DatanodeStorageInfo[] {target});
+    if (BlockManager.LOG.isDebugEnabled()) {
+      BlockManager.LOG.debug("Add replication task from source {} to "
+          + "target {} for EC block {}", source, target, targetBlk);
+    }
+  }
+
+  private List<Integer> findDecommissioningSources() {
+    List<Integer> srcIndices = new ArrayList<>();
+    for (int i = 0; i < getSrcNodes().length; i++) {
+      if (getSrcNodes()[i].isDecommissionInProgress()) {
+        srcIndices.add(i);
+      }
+    }
+    return srcIndices;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/743a99f2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index 24601a2..f4d274a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -55,7 +55,7 @@ class ReplicationWork extends BlockReconstructionWork {
   }
 
   @Override
-  void addTaskToDatanode() {
+  void addTaskToDatanode(NumberReplicas numberReplicas) {
     getSrcNodes()[0].addBlockToBeReplicated(getBlock(), getTargets());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/743a99f2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
new file mode 100644
index 0000000..bde2ceb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -0,0 +1,473 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class tests the decommissioning of datanode with striped blocks.
+ */
+public class TestDecommissionWithStriped {
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestDecommissionWithStriped.class);
+
+  // heartbeat interval in seconds
+  private static final int HEARTBEAT_INTERVAL = 1;
+  // block report in msec
+  private static final int BLOCKREPORT_INTERVAL_MSEC = 1000;
+  // replication interval
+  private static final int NAMENODE_REPLICATION_INTERVAL = 1;
+
+  private Path decommissionDir;
+  private Path hostsFile;
+  private Path excludeFile;
+  private FileSystem localFileSys;
+
+  private Configuration conf;
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem dfs;
+  private int numDNs;
+  private final int blockSize = StripedFileTestUtil.blockSize;
+  private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
+  private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
+  private final Path ecDir = new Path("/" + this.getClass().getSimpleName());
+
+  private FSNamesystem fsn;
+  private BlockManager bm;
+  private DFSClient client;
+
+  @Before
+  public void setup() throws IOException {
+    conf = new HdfsConfiguration();
+
+    // Set up the hosts/exclude files.
+    localFileSys = FileSystem.getLocal(conf);
+    Path workingDir = localFileSys.getWorkingDirectory();
+    decommissionDir = new Path(workingDir,
+        PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
+    hostsFile = new Path(decommissionDir, "hosts");
+    excludeFile = new Path(decommissionDir, "exclude");
+    writeConfigFile(hostsFile, null);
+    writeConfigFile(excludeFile, null);
+
+    // Setup conf
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
+    conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
+    conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        2000);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
+        BLOCKREPORT_INTERVAL_MSEC);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
+        4);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
+        NAMENODE_REPLICATION_INTERVAL);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setInt(
+        DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
+        StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE - 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
+
+    numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
+    cluster.waitActive();
+    dfs = cluster.getFileSystem(0);
+    fsn = cluster.getNamesystem();
+    bm = fsn.getBlockManager();
+    client = getDfsClient(cluster.getNameNode(0), conf);
+
+    dfs.mkdirs(ecDir);
+    dfs.setErasureCodingPolicy(ecDir, null);
+  }
+
+  @After
+  public void teardown() throws IOException {
+    cleanupFile(localFileSys, decommissionDir);
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @Test(timeout = 120000)
+  public void testFileFullBlockGroup() throws Exception {
+    LOG.info("Starting test testFileFullBlockGroup");
+    testDecommission(blockSize * dataBlocks, 9, 1, "testFileFullBlockGroup");
+  }
+
+  @Test(timeout = 120000)
+  public void testFileSmallerThanOneCell() throws Exception {
+    LOG.info("Starting test testFileSmallerThanOneCell");
+    testDecommission(cellSize - 1, 4, 1, "testFileSmallerThanOneCell");
+  }
+
+  @Test(timeout = 120000)
+  public void testFileSmallerThanOneStripe() throws Exception {
+    LOG.info("Starting test testFileSmallerThanOneStripe");
+    testDecommission(cellSize * 2, 5, 1, "testFileSmallerThanOneStripe");
+  }
+
+  @Test(timeout = 120000)
+  public void testDecommissionTwoNodes() throws Exception {
+    LOG.info("Starting test testDecommissionTwoNodes");
+    testDecommission(blockSize * dataBlocks, 9, 2, "testDecommissionTwoNodes");
+  }
+
+  @Test(timeout = 120000)
+  public void testDecommissionWithURBlockForSameBlockGroup() throws Exception {
+    LOG.info("Starting test testDecommissionWithURBlocksForSameBlockGroup");
+
+    final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
+    int writeBytes = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2;
+    writeStripedFile(dfs, ecFile, writeBytes);
+    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+
+    final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
+    LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
+        .get(0);
+    DatanodeInfo[] dnLocs = lb.getLocations();
+    assertEquals(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, dnLocs.length);
+    int decommNodeIndex = NUM_DATA_BLOCKS - 1;
+    int stopNodeIndex = 1;
+
+    // add the nodes which will be decommissioning
+    decommisionNodes.add(dnLocs[decommNodeIndex]);
+
+    // stop excess dns to avoid immediate reconstruction.
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    List<DataNodeProperties> stoppedDns = new ArrayList<>();
+    for (DatanodeInfo liveDn : info) {
+      boolean usedNode = false;
+      for (DatanodeInfo datanodeInfo : dnLocs) {
+        if (liveDn.getXferAddr().equals(datanodeInfo.getXferAddr())) {
+          usedNode = true;
+          break;
+        }
+      }
+      if (!usedNode) {
+        DataNode dn = cluster.getDataNode(liveDn.getIpcPort());
+        stoppedDns.add(cluster.stopDataNode(liveDn.getXferAddr()));
+        cluster.setDataNodeDead(dn.getDatanodeId());
+        LOG.info("stop datanode " + dn.getDatanodeId().getHostName());
+      }
+    }
+    DataNode dn = cluster.getDataNode(dnLocs[stopNodeIndex].getIpcPort());
+    cluster.stopDataNode(dnLocs[stopNodeIndex].getXferAddr());
+    cluster.setDataNodeDead(dn.getDatanodeId());
+    numDNs = numDNs - 1;
+
+    // Decommission node in a new thread. Verify that node is decommissioned.
+    final CountDownLatch decomStarted = new CountDownLatch(0);
+    Thread decomTh = new Thread() {
+      public void run() {
+        try {
+          decomStarted.countDown();
+          decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
+        } catch (Exception e) {
+          LOG.error("Exception while decommissioning", e);
+          Assert.fail("Shouldn't throw exception!");
+        }
+      };
+    };
+    int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
+    int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
+    decomTh.start();
+    decomStarted.await(5, TimeUnit.SECONDS);
+    Thread.sleep(3000); // grace period to trigger decommissioning call
+    // start datanode so that decommissioning live node will be finished
+    for (DataNodeProperties dnp : stoppedDns) {
+      cluster.restartDataNode(dnp, true);
+      LOG.info("Restarts stopped datanode:{} to trigger block reconstruction",
+          dnp.datanode);
+    }
+    cluster.waitActive();
+
+    LOG.info("Waiting to finish decommissioning node:{}", decommisionNodes);
+    decomTh.join(20000); // waiting 20secs to finish decommission
+    LOG.info("Finished decommissioning node:{}", decommisionNodes);
+
+    assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
+    assertEquals(liveDecomissioned + decommisionNodes.size(),
+        fsn.getNumDecomLiveDataNodes());
+
+    // Ensure decommissioned datanode is not automatically shutdown
+    DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
+    assertEquals("All datanodes must be alive", numDNs,
+        client.datanodeReport(DatanodeReportType.LIVE).length);
+
+    assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
+    StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
+        null);
+    cleanupFile(dfs, ecFile);
+  }
+
+  private void testDecommission(int writeBytes, int storageCount,
+      int decomNodeCount, String filename) throws IOException, Exception {
+    Path ecFile = new Path(ecDir, filename);
+    writeStripedFile(dfs, ecFile, writeBytes);
+    List<DatanodeInfo> decommisionNodes = getDecommissionDatanode(dfs, ecFile,
+        writeBytes, decomNodeCount);
+
+    int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
+    int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
+    ((HdfsDataInputStream) dfs.open(ecFile)).getAllBlocks();
+    // Decommission node. Verify that node is decommissioned.
+    decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
+
+    assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
+    assertEquals(liveDecomissioned + decommisionNodes.size(),
+        fsn.getNumDecomLiveDataNodes());
+
+    // Ensure decommissioned datanode is not automatically shutdown
+    DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
+    assertEquals("All datanodes must be alive", numDNs,
+        client.datanodeReport(DatanodeReportType.LIVE).length);
+
+    assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
+    StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
+        null);
+    cleanupFile(dfs, ecFile);
+  }
+
+  private List<DatanodeInfo> getDecommissionDatanode(DistributedFileSystem dfs,
+      Path ecFile, int writeBytes, int decomNodeCount) throws IOException {
+    ArrayList<DatanodeInfo> decommissionedNodes = new ArrayList<>();
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    BlockLocation[] fileBlockLocations = dfs.getFileBlockLocations(ecFile, 0,
+        writeBytes);
+    for (String dnName : fileBlockLocations[0].getNames()) {
+      for (DatanodeInfo dn : info) {
+        if (dnName.equals(dn.getXferAddr())) {
+          decommissionedNodes.add(dn);
+        }
+        if (decommissionedNodes.size() >= decomNodeCount) {
+          return decommissionedNodes;
+        }
+      }
+    }
+    return decommissionedNodes;
+  }
+
+  /* Get DFSClient to the namenode */
+  private static DFSClient getDfsClient(NameNode nn, Configuration conf)
+      throws IOException {
+    return new DFSClient(nn.getNameNodeAddress(), conf);
+  }
+
+  private void writeStripedFile(DistributedFileSystem dfs, Path ecFile,
+      int writeBytes) throws IOException, Exception {
+    byte[] bytes = StripedFileTestUtil.generateBytes(writeBytes);
+    DFSTestUtil.writeFile(dfs, ecFile, new String(bytes));
+    StripedFileTestUtil.waitBlockGroupsReported(dfs, ecFile.toString());
+
+    StripedFileTestUtil.checkData(dfs, ecFile, writeBytes,
+        new ArrayList<DatanodeInfo>(), null);
+  }
+
+  private void writeConfigFile(Path name, List<String> nodes)
+      throws IOException {
+    // delete if it already exists
+    if (localFileSys.exists(name)) {
+      localFileSys.delete(name, true);
+    }
+
+    FSDataOutputStream stm = localFileSys.create(name);
+
+    if (nodes != null) {
+      for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
+        String node = it.next();
+        stm.writeBytes(node);
+        stm.writeBytes("\n");
+      }
+    }
+    stm.close();
+  }
+
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  /*
+   * decommission the DN at index dnIndex or one random node if dnIndex is set
+   * to -1 and wait for the node to reach the given {@code waitForState}.
+   */
+  private void decommissionNode(int nnIndex,
+      List<DatanodeInfo> decommissionedNodes, AdminStates waitForState)
+          throws IOException {
+    DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+
+    // write nodename into the exclude file.
+    ArrayList<String> excludeNodes = new ArrayList<String>();
+    for (DatanodeInfo dn : decommissionedNodes) {
+      boolean nodeExists = false;
+      for (DatanodeInfo dninfo : info) {
+        if (dninfo.getDatanodeUuid().equals(dn.getDatanodeUuid())) {
+          nodeExists = true;
+          break;
+        }
+      }
+      assertTrue("Datanode: " + dn + " is not LIVE", nodeExists);
+      excludeNodes.add(dn.getName());
+      LOG.info("Decommissioning node: " + dn.getName());
+    }
+    writeConfigFile(excludeFile, excludeNodes);
+    refreshNodes(cluster.getNamesystem(nnIndex), conf);
+    for (DatanodeInfo dn : decommissionedNodes) {
+      DatanodeInfo ret = NameNodeAdapter
+          .getDatanode(cluster.getNamesystem(nnIndex), dn);
+      waitNodeState(ret, waitForState);
+    }
+  }
+
+  private static void refreshNodes(final FSNamesystem ns,
+      final Configuration conf) throws IOException {
+    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
+  }
+
+  /*
+   * Wait till node is fully decommissioned.
+   */
+  private void waitNodeState(DatanodeInfo node, AdminStates state) {
+    boolean done = state == node.getAdminState();
+    while (!done) {
+      LOG.info("Waiting for node " + node + " to change state to " + state
+          + " current state: " + node.getAdminState());
+      try {
+        Thread.sleep(HEARTBEAT_INTERVAL * 500);
+      } catch (InterruptedException e) {
+        // nothing
+      }
+      done = state == node.getAdminState();
+    }
+    LOG.info("node " + node + " reached the state " + state);
+  }
+
+  /**
+   * Verify that the number of replicas are as expected for each block in the
+   * given file. For blocks with a decommissioned node, verify that their
+   * replication is 1 more than what is specified. For blocks without
+   * decommissioned nodes, verify their replication is equal to what is
+   * specified.
+   *
+   * @param downnode
+   *          - if null, there is no decommissioned node for this file.
+   * @return - null if no failure found, else an error message string.
+   */
+  private static String checkFile(FileSystem fileSys, Path name, int repl,
+      List<DatanodeInfo> decommissionedNodes, int numDatanodes)
+          throws IOException {
+    boolean isNodeDown = decommissionedNodes.size() > 0;
+    // need a raw stream
+    assertTrue("Not HDFS:" + fileSys.getUri(),
+        fileSys instanceof DistributedFileSystem);
+    HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
+    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
+    for (LocatedBlock blk : dinfo) { // for each block
+      int hasdown = 0;
+      DatanodeInfo[] nodes = blk.getLocations();
+      for (int j = 0; j < nodes.length; j++) { // for each replica
+        LOG.info("Block Locations size={}, locs={}, j=", nodes.length,
+            nodes[j].toString(), j);
+        boolean found = false;
+        for (DatanodeInfo datanodeInfo : decommissionedNodes) {
+          // check against decommissioned list
+          if (isNodeDown
+              && nodes[j].getXferAddr().equals(datanodeInfo.getXferAddr())) {
+            found = true;
+            hasdown++;
+            // Downnode must actually be decommissioned
+            if (!nodes[j].isDecommissioned()) {
+              return "For block " + blk.getBlock() + " replica on " + nodes[j]
+                  + " is given as downnode, " + "but is not decommissioned";
+            }
+            // TODO: Add check to verify that the Decommissioned node (if any)
+            // should only be last node in list.
+            LOG.info("Block " + blk.getBlock() + " replica on " + nodes[j]
+                + " is decommissioned.");
+          }
+        }
+        // Non-downnodes must not be decommissioned
+        if (!found && nodes[j].isDecommissioned()) {
+          return "For block " + blk.getBlock() + " replica on " + nodes[j]
+              + " is unexpectedly decommissioned";
+        }
+      }
+
+      LOG.info("Block " + blk.getBlock() + " has " + hasdown
+          + " decommissioned replica.");
+      if (Math.min(numDatanodes, repl + hasdown) != nodes.length) {
+        return "Wrong number of replicas for block " + blk.getBlock() + ": "
+            + nodes.length + ", expected "
+            + Math.min(numDatanodes, repl + hasdown);
+      }
+    }
+    return null;
+  }
+}
\ No newline at end of file


[25/34] hadoop git commit: HADOOP-12798. Update changelog and release notes (2016-03-04) (aw)

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/CHANGES.2.6.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/CHANGES.2.6.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/CHANGES.2.6.1.md
new file mode 100644
index 0000000..c5820aa
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/CHANGES.2.6.1.md
@@ -0,0 +1,228 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 2.6.1 - 2015-09-23
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12280](https://issues.apache.org/jira/browse/HADOOP-12280) | Skip unit tests based on maven profile rather than NativeCodeLoader.isNativeCodeLoaded |  Minor | test | Masatake Iwasaki | Masatake Iwasaki |
+| [HADOOP-11812](https://issues.apache.org/jira/browse/HADOOP-11812) | Implement listLocatedStatus for ViewFileSystem to speed up split calculation |  Blocker | fs | Gera Shegalov | Gera Shegalov |
+| [HADOOP-11506](https://issues.apache.org/jira/browse/HADOOP-11506) | Configuration variable expansion regex expensive for long values |  Major | conf | Dmitriy V. Ryaboy | Gera Shegalov |
+| [HADOOP-11466](https://issues.apache.org/jira/browse/HADOOP-11466) | FastByteComparisons: do not use UNSAFE\_COMPARER on the SPARC architecture because it is slower there |  Minor | io, performance, util | Suman Somasundar | Suman Somasundar |
+| [HADOOP-7139](https://issues.apache.org/jira/browse/HADOOP-7139) | Allow appending to existing SequenceFiles |  Major | io | Stephen Rose | Kanaka Kumar Avvaru |
+| [HDFS-8384](https://issues.apache.org/jira/browse/HDFS-8384) | Allow NN to startup if there are files having a lease but are not under construction |  Minor | namenode | Tsz Wo Nicholas Sze | Jing Zhao |
+| [HDFS-7579](https://issues.apache.org/jira/browse/HDFS-7579) | Improve log reporting during block report rpc failure |  Minor | datanode | Charles Lamb | Charles Lamb |
+| [HDFS-7531](https://issues.apache.org/jira/browse/HDFS-7531) | Improve the concurrent access on FsVolumeList |  Major | datanode | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-7446](https://issues.apache.org/jira/browse/HDFS-7446) | HDFS inotify should have the ability to determine what txid it has read up to |  Major | hdfs-client | Colin Patrick McCabe | Colin Patrick McCabe |
+| [HDFS-7314](https://issues.apache.org/jira/browse/HDFS-7314) | When the DFSClient lease cannot be renewed, abort open-for-write files rather than the entire DFSClient |  Major | . | Ming Ma | Ming Ma |
+| [HDFS-7278](https://issues.apache.org/jira/browse/HDFS-7278) | Add a command that allows sysadmins to manually trigger full block reports from a DN |  Major | datanode | Colin Patrick McCabe | Colin Patrick McCabe |
+| [HDFS-7182](https://issues.apache.org/jira/browse/HDFS-7182) | JMX metrics aren't accessible when NN is busy |  Major | . | Ming Ma | Ming Ma |
+| [MAPREDUCE-6267](https://issues.apache.org/jira/browse/MAPREDUCE-6267) | Refactor JobSubmitter#copyAndConfigureFiles into it's own class |  Minor | . | Chris Trezzo | Chris Trezzo |
+| [YARN-3978](https://issues.apache.org/jira/browse/YARN-3978) | Configurably turn off the saving of container info in Generic AHS |  Major | timelineserver, yarn | Eric Payne | Eric Payne |
+| [YARN-3249](https://issues.apache.org/jira/browse/YARN-3249) | Add a "kill application" button to Resource Manager's Web UI |  Minor | resourcemanager | Ryu Kobayashi | Ryu Kobayashi |
+| [YARN-3248](https://issues.apache.org/jira/browse/YARN-3248) | Display count of nodes blacklisted by apps in the web UI |  Major | capacityscheduler, resourcemanager | Varun Vasudev | Varun Vasudev |
+| [YARN-3230](https://issues.apache.org/jira/browse/YARN-3230) | Clarify application states on the web UI |  Major | . | Jian He | Jian He |
+| [YARN-2301](https://issues.apache.org/jira/browse/YARN-2301) | Improve yarn container command |  Major | . | Jian He | Naganarasimha G R |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-11934](https://issues.apache.org/jira/browse/HADOOP-11934) | Use of JavaKeyStoreProvider in LdapGroupsMapping causes infinite loop |  Blocker | security | Mike Yoder | Larry McCay |
+| [HADOOP-11932](https://issues.apache.org/jira/browse/HADOOP-11932) |  MetricsSinkAdapter hangs when being stopped |  Critical | . | Jian He | Brahma Reddy Battula |
+| [HADOOP-11802](https://issues.apache.org/jira/browse/HADOOP-11802) | DomainSocketWatcher thread terminates sometimes after there is an I/O error during requestShortCircuitShm |  Major | . | Eric Payne | Colin Patrick McCabe |
+| [HADOOP-11730](https://issues.apache.org/jira/browse/HADOOP-11730) | Regression: s3n read failure recovery broken |  Major | fs/s3 | Takenori Sato | Takenori Sato |
+| [HADOOP-11674](https://issues.apache.org/jira/browse/HADOOP-11674) | oneByteBuf in CryptoInputStream and CryptoOutputStream should be non static |  Critical | io | Sean Busbey | Sean Busbey |
+| [HADOOP-11604](https://issues.apache.org/jira/browse/HADOOP-11604) | Prevent ConcurrentModificationException while closing domain sockets during shutdown of DomainSocketWatcher thread. |  Critical | net | Liang Xie | Chris Nauroth |
+| [HADOOP-11491](https://issues.apache.org/jira/browse/HADOOP-11491) | HarFs incorrectly declared as requiring an authority |  Critical | fs | Gera Shegalov | Brahma Reddy Battula |
+| [HADOOP-11482](https://issues.apache.org/jira/browse/HADOOP-11482) | Use correct UGI when KMSClientProvider is called by a proxy user |  Major | . | Arun Suresh | Arun Suresh |
+| [HADOOP-11368](https://issues.apache.org/jira/browse/HADOOP-11368) | Fix SSLFactory truststore reloader thread leak in KMSClientProvider |  Major | kms | Arun Suresh | Arun Suresh |
+| [HADOOP-11350](https://issues.apache.org/jira/browse/HADOOP-11350) | The size of header buffer of HttpServer is too small when HTTPS is enabled |  Major | security | Benoy Antony | Benoy Antony |
+| [HADOOP-11343](https://issues.apache.org/jira/browse/HADOOP-11343) | Overflow is not properly handled in caclulating final iv for AES CTR |  Blocker | security | Jerry Chen | Jerry Chen |
+| [HADOOP-11333](https://issues.apache.org/jira/browse/HADOOP-11333) | Fix deadlock in DomainSocketWatcher when the notification pipe is full |  Major | . | zhaoyunjiong | zhaoyunjiong |
+| [HADOOP-11316](https://issues.apache.org/jira/browse/HADOOP-11316) | "mvn package -Pdist,docs -DskipTests -Dtar" fails because of non-ascii characters |  Blocker | . | Tsuyoshi Ozawa | Tsuyoshi Ozawa |
+| [HADOOP-11295](https://issues.apache.org/jira/browse/HADOOP-11295) | RPC Server Reader thread can't shutdown if RPCCallQueue is full |  Major | . | Ming Ma | Ming Ma |
+| [HADOOP-11238](https://issues.apache.org/jira/browse/HADOOP-11238) | Update the NameNode's Group Cache in the background when possible |  Minor | . | Chris Li | Chris Li |
+| [HADOOP-10786](https://issues.apache.org/jira/browse/HADOOP-10786) | Fix UGI#reloginFromKeytab on Java 8 |  Major | security | Tobi Vollebregt | Stephen Chu |
+| [HADOOP-8151](https://issues.apache.org/jira/browse/HADOOP-8151) | Error handling in snappy decompressor throws invalid exceptions |  Major | io, native | Todd Lipcon | Matt Foley |
+| [HDFS-8863](https://issues.apache.org/jira/browse/HDFS-8863) | The remaining space check in BlockPlacementPolicyDefault is flawed |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-8846](https://issues.apache.org/jira/browse/HDFS-8846) | Add a unit test for INotify functionality across a layout version upgrade |  Major | namenode | Zhe Zhang | Zhe Zhang |
+| [HDFS-8486](https://issues.apache.org/jira/browse/HDFS-8486) | DN startup may cause severe data loss |  Blocker | datanode | Daryn Sharp | Daryn Sharp |
+| [HDFS-8480](https://issues.apache.org/jira/browse/HDFS-8480) | Fix performance and timeout issues in HDFS-7929 by using hard-links to preserve old edit logs instead of copying them |  Critical | . | Zhe Zhang | Zhe Zhang |
+| [HDFS-8431](https://issues.apache.org/jira/browse/HDFS-8431) | hdfs crypto class not found in Windows |  Critical | scripts | Sumana Sathish | Anu Engineer |
+| [HDFS-8404](https://issues.apache.org/jira/browse/HDFS-8404) | Pending block replication can get stuck using older genstamp |  Major | namenode | Nathan Roberts | Nathan Roberts |
+| [HDFS-8270](https://issues.apache.org/jira/browse/HDFS-8270) | create() always retried with hardcoded timeout when file already exists with open lease |  Major | hdfs-client | Andrey Stepachev | J.Andreina |
+| [HDFS-8245](https://issues.apache.org/jira/browse/HDFS-8245) | Standby namenode doesn't process DELETED\_BLOCK if the add block request is in edit log. |  Major | . | Rushabh S Shah | Rushabh S Shah |
+| [HDFS-8219](https://issues.apache.org/jira/browse/HDFS-8219) | setStoragePolicy with folder behavior is different after cluster restart |  Major | . | Peter Shi | Surendra Singh Lilhore |
+| [HDFS-8127](https://issues.apache.org/jira/browse/HDFS-8127) | NameNode Failover during HA upgrade can cause DataNode to finalize upgrade |  Blocker | ha | Jing Zhao | Jing Zhao |
+| [HDFS-8072](https://issues.apache.org/jira/browse/HDFS-8072) | Reserved RBW space is not released if client terminates while writing block |  Major | datanode | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-8070](https://issues.apache.org/jira/browse/HDFS-8070) | Pre-HDFS-7915 DFSClient cannot use short circuit on post-HDFS-7915 DataNode |  Blocker | caching | Gopal V | Colin Patrick McCabe |
+| [HDFS-8046](https://issues.apache.org/jira/browse/HDFS-8046) | Allow better control of getContentSummary |  Major | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-7999](https://issues.apache.org/jira/browse/HDFS-7999) | FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock for a very long time |  Major | . | zhouyingchao | zhouyingchao |
+| [HDFS-7980](https://issues.apache.org/jira/browse/HDFS-7980) | Incremental BlockReport will dramatically slow down the startup of  a namenode |  Major | . | Hui Zheng | Walter Su |
+| [HDFS-7960](https://issues.apache.org/jira/browse/HDFS-7960) | The full block report should prune zombie storages even if they're not empty |  Critical | . | Lei (Eddy) Xu | Colin Patrick McCabe |
+| [HDFS-7930](https://issues.apache.org/jira/browse/HDFS-7930) | commitBlockSynchronization() does not remove locations |  Blocker | namenode | Konstantin Shvachko | Yi Liu |
+| [HDFS-7929](https://issues.apache.org/jira/browse/HDFS-7929) | inotify unable fetch pre-upgrade edit log segments once upgrade starts |  Major | . | Zhe Zhang | Zhe Zhang |
+| [HDFS-7915](https://issues.apache.org/jira/browse/HDFS-7915) | The DataNode can sometimes allocate a ShortCircuitShm slot and fail to tell the DFSClient about it because of a network error |  Major | . | Colin Patrick McCabe | Colin Patrick McCabe |
+| [HDFS-7894](https://issues.apache.org/jira/browse/HDFS-7894) | Rolling upgrade readiness is not updated in jmx until query command is issued. |  Critical | . | Kihwal Lee | Brahma Reddy Battula |
+| [HDFS-7885](https://issues.apache.org/jira/browse/HDFS-7885) | Datanode should not trust the generation stamp provided by client |  Critical | datanode | vitthal (Suhas) Gogate | Tsz Wo Nicholas Sze |
+| [HDFS-7884](https://issues.apache.org/jira/browse/HDFS-7884) | NullPointerException in BlockSender |  Blocker | datanode | Tsz Wo Nicholas Sze | Brahma Reddy Battula |
+| [HDFS-7871](https://issues.apache.org/jira/browse/HDFS-7871) | NameNodeEditLogRoller can keep printing "Swallowing exception" message |  Critical | . | Jing Zhao | Jing Zhao |
+| [HDFS-7830](https://issues.apache.org/jira/browse/HDFS-7830) | DataNode does not release the volume lock when adding a volume fails. |  Major | datanode | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-7788](https://issues.apache.org/jira/browse/HDFS-7788) | Post-2.6 namenode may not start up with an image containing inodes created with an old release. |  Blocker | . | Kihwal Lee | Rushabh S Shah |
+| [HDFS-7763](https://issues.apache.org/jira/browse/HDFS-7763) | fix zkfc hung issue due to not catching exception in a corner case |  Major | ha | Liang Xie | Liang Xie |
+| [HDFS-7742](https://issues.apache.org/jira/browse/HDFS-7742) | favoring decommissioning node for replication can cause a block to stay underreplicated for long periods |  Major | namenode | Nathan Roberts | Nathan Roberts |
+| [HDFS-7733](https://issues.apache.org/jira/browse/HDFS-7733) | NFS: readdir/readdirplus return null directory attribute on failure |  Major | nfs | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-7714](https://issues.apache.org/jira/browse/HDFS-7714) | Simultaneous restart of HA NameNodes and DataNode can cause DataNode to register successfully with only one NameNode. |  Major | datanode | Chris Nauroth | Vinayakumar B |
+| [HDFS-7707](https://issues.apache.org/jira/browse/HDFS-7707) | Edit log corruption due to delayed block removal again |  Major | namenode | Yongjun Zhang | Yongjun Zhang |
+| [HDFS-7610](https://issues.apache.org/jira/browse/HDFS-7610) | Fix removal of dynamically added DN volumes |  Major | datanode | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-7609](https://issues.apache.org/jira/browse/HDFS-7609) | Avoid retry cache collision when Standby NameNode loading edits |  Critical | namenode | Carrey Zhan | Ming Ma |
+| [HDFS-7596](https://issues.apache.org/jira/browse/HDFS-7596) | NameNode should prune dead storages from storageMap |  Major | namenode | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-7587](https://issues.apache.org/jira/browse/HDFS-7587) | Edit log corruption can happen if append fails with a quota violation |  Blocker | namenode | Kihwal Lee | Jing Zhao |
+| [HDFS-7575](https://issues.apache.org/jira/browse/HDFS-7575) | Upgrade should generate a unique storage ID for each volume |  Critical | . | Lars Francke | Arpit Agarwal |
+| [HDFS-7552](https://issues.apache.org/jira/browse/HDFS-7552) | change FsVolumeList toString() to fix TestDataNodeVolumeFailureToleration |  Major | datanode, test | Liang Xie | Liang Xie |
+| [HDFS-7533](https://issues.apache.org/jira/browse/HDFS-7533) | Datanode sometimes does not shutdown on receiving upgrade shutdown command |  Major | . | Kihwal Lee | Eric Payne |
+| [HDFS-7503](https://issues.apache.org/jira/browse/HDFS-7503) | Namenode restart after large deletions can cause slow processReport (due to logging) |  Major | namenode | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-7489](https://issues.apache.org/jira/browse/HDFS-7489) | Incorrect locking in FsVolumeList#checkDirs can hang datanodes |  Critical | datanode | Noah Lorang | Noah Lorang |
+| [HDFS-7470](https://issues.apache.org/jira/browse/HDFS-7470) | SecondaryNameNode need twice memory when calling reloadFromImageFile |  Major | namenode | zhaoyunjiong | zhaoyunjiong |
+| [HDFS-7443](https://issues.apache.org/jira/browse/HDFS-7443) | Datanode upgrade to BLOCKID\_BASED\_LAYOUT fails if duplicate block files are present in the same volume |  Blocker | . | Kihwal Lee | Colin Patrick McCabe |
+| [HDFS-7425](https://issues.apache.org/jira/browse/HDFS-7425) | NameNode block deletion logging uses incorrect appender. |  Minor | namenode | Chris Nauroth | Chris Nauroth |
+| [HDFS-7263](https://issues.apache.org/jira/browse/HDFS-7263) | Snapshot read can reveal future bytes for appended files. |  Major | hdfs-client | Konstantin Shvachko | Tao Luo |
+| [HDFS-7235](https://issues.apache.org/jira/browse/HDFS-7235) | DataNode#transferBlock should report blocks that don't exist using reportBadBlock |  Major | datanode, namenode | Yongjun Zhang | Yongjun Zhang |
+| [HDFS-7225](https://issues.apache.org/jira/browse/HDFS-7225) | Remove stale block invalidation work when DN re-registers with different UUID |  Major | namenode | Zhe Zhang | Zhe Zhang |
+| [HDFS-7213](https://issues.apache.org/jira/browse/HDFS-7213) | processIncrementalBlockReport performance degradation |  Critical | namenode | Daryn Sharp | Eric Payne |
+| [HDFS-7009](https://issues.apache.org/jira/browse/HDFS-7009) | Active NN and standby NN have different live nodes |  Major | datanode | Ming Ma | Ming Ma |
+| [HDFS-4882](https://issues.apache.org/jira/browse/HDFS-4882) | Prevent the Namenode's LeaseManager from looping forever in checkLeases |  Critical | hdfs-client, namenode | Zesheng Wu | Ravi Prakash |
+| [HDFS-3443](https://issues.apache.org/jira/browse/HDFS-3443) | Fix NPE when namenode transition to active during startup by adding checkNNStartup() in NameNodeRpcServer |  Major | auto-failover, ha | suja s | Vinayakumar B |
+| [MAPREDUCE-6361](https://issues.apache.org/jira/browse/MAPREDUCE-6361) | NPE issue in shuffle caused by concurrent issue between copySucceeded() in one thread and copyFailed() in another thread on the same host |  Critical | . | Junping Du | Junping Du |
+| [MAPREDUCE-6324](https://issues.apache.org/jira/browse/MAPREDUCE-6324) | Uber jobs fail to update AMRM token when it rolls over |  Blocker | mr-am | Jason Lowe | Jason Lowe |
+| [MAPREDUCE-6303](https://issues.apache.org/jira/browse/MAPREDUCE-6303) | Read timeout when retrying a fetch error can be fatal to a reducer |  Blocker | . | Jason Lowe | Jason Lowe |
+| [MAPREDUCE-6300](https://issues.apache.org/jira/browse/MAPREDUCE-6300) | Task list sort by task id broken |  Minor | . | Siqi Li | Siqi Li |
+| [MAPREDUCE-6238](https://issues.apache.org/jira/browse/MAPREDUCE-6238) | MR2 can't run local jobs with -libjars command options which is a regression from MR1 |  Critical | mrv2 | zhihai xu | zhihai xu |
+| [MAPREDUCE-6237](https://issues.apache.org/jira/browse/MAPREDUCE-6237) | Multiple mappers with DBInputFormat don't work because of reusing conections |  Major | mrv2 | Kannan Rajah | Kannan Rajah |
+| [MAPREDUCE-6230](https://issues.apache.org/jira/browse/MAPREDUCE-6230) | MR AM does not survive RM restart if RM activated a new AMRM secret key |  Blocker | mr-am | Jason Lowe | Jason Lowe |
+| [MAPREDUCE-6166](https://issues.apache.org/jira/browse/MAPREDUCE-6166) | Reducers do not validate checksum of map outputs when fetching directly to disk |  Major | mrv2 | Eric Payne | Eric Payne |
+| [MAPREDUCE-5649](https://issues.apache.org/jira/browse/MAPREDUCE-5649) | Reduce cannot use more than 2G memory  for the final merge |  Major | mrv2 | stanley shi | Gera Shegalov |
+| [YARN-4047](https://issues.apache.org/jira/browse/YARN-4047) | ClientRMService getApplications has high scheduler lock contention |  Major | resourcemanager | Jason Lowe | Jason Lowe |
+| [YARN-3999](https://issues.apache.org/jira/browse/YARN-3999) | RM hangs on draining events |  Major | . | Jian He | Jian He |
+| [YARN-3990](https://issues.apache.org/jira/browse/YARN-3990) | AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when Node is connected/disconnected |  Critical | resourcemanager | Rohith Sharma K S | Bibin A Chundatt |
+| [YARN-3850](https://issues.apache.org/jira/browse/YARN-3850) | NM fails to read files from full disks which can lead to container logs being lost and other issues |  Blocker | log-aggregation, nodemanager | Varun Saxena | Varun Saxena |
+| [YARN-3832](https://issues.apache.org/jira/browse/YARN-3832) | Resource Localization fails on a cluster due to existing cache directories |  Critical | nodemanager | Ranga Swamy | Brahma Reddy Battula |
+| [YARN-3733](https://issues.apache.org/jira/browse/YARN-3733) | Fix DominantRC#compare() does not work as expected if cluster resource is empty |  Blocker | resourcemanager | Bibin A Chundatt | Rohith Sharma K S |
+| [YARN-3725](https://issues.apache.org/jira/browse/YARN-3725) | App submission via REST API is broken in secure mode due to Timeline DT service address is empty |  Blocker | resourcemanager, timelineserver | Zhijie Shen | Zhijie Shen |
+| [YARN-3585](https://issues.apache.org/jira/browse/YARN-3585) | NodeManager cannot exit on SHUTDOWN event triggered and NM recovery is enabled |  Critical | . | Peng Zhang | Rohith Sharma K S |
+| [YARN-3526](https://issues.apache.org/jira/browse/YARN-3526) | ApplicationMaster tracking URL is incorrectly redirected on a QJM cluster |  Major | resourcemanager, webapp | Weiwei Yang | Weiwei Yang |
+| [YARN-3493](https://issues.apache.org/jira/browse/YARN-3493) | RM fails to come up with error "Failed to load/recover state" when  mem settings are changed |  Critical | yarn | Sumana Sathish | Jian He |
+| [YARN-3464](https://issues.apache.org/jira/browse/YARN-3464) | Race condition in LocalizerRunner kills localizer before localizing all resources |  Critical | nodemanager | zhihai xu | zhihai xu |
+| [YARN-3393](https://issues.apache.org/jira/browse/YARN-3393) | Getting application(s) goes wrong when app finishes before starting the attempt |  Critical | timelineserver | Zhijie Shen | Zhijie Shen |
+| [YARN-3369](https://issues.apache.org/jira/browse/YARN-3369) | Missing NullPointer check in AppSchedulingInfo causes RM to die |  Blocker | resourcemanager | Giovanni Matteo Fumarola | Brahma Reddy Battula |
+| [YARN-3287](https://issues.apache.org/jira/browse/YARN-3287) | TimelineClient kerberos authentication failure uses wrong login context. |  Major | . | Jonathan Eagles | Daryn Sharp |
+| [YARN-3267](https://issues.apache.org/jira/browse/YARN-3267) | Timelineserver applies the ACL rules after applying the limit on the number of records |  Major | . | Prakash Ramachandran | Chang Li |
+| [YARN-3251](https://issues.apache.org/jira/browse/YARN-3251) | Fix CapacityScheduler deadlock when computing absolute max avail capacity (short term fix for 2.6.1) |  Blocker | . | Jason Lowe | Craig Welch |
+| [YARN-3242](https://issues.apache.org/jira/browse/YARN-3242) | Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving events for old client |  Critical | resourcemanager | zhihai xu | zhihai xu |
+| [YARN-3239](https://issues.apache.org/jira/browse/YARN-3239) | WebAppProxy does not support a final tracking url which has query fragments and params |  Major | . | Hitesh Shah | Jian He |
+| [YARN-3238](https://issues.apache.org/jira/browse/YARN-3238) | Connection timeouts to nodemanagers are retried at multiple levels |  Blocker | . | Jason Lowe | Jason Lowe |
+| [YARN-3231](https://issues.apache.org/jira/browse/YARN-3231) | FairScheduler: Changing queueMaxRunningApps interferes with pending jobs |  Critical | . | Siqi Li | Siqi Li |
+| [YARN-3227](https://issues.apache.org/jira/browse/YARN-3227) | Timeline renew delegation token fails when RM user's TGT is expired |  Critical | . | Jonathan Eagles | Zhijie Shen |
+| [YARN-3222](https://issues.apache.org/jira/browse/YARN-3222) | RMNodeImpl#ReconnectNodeTransition should send scheduler events in sequential order |  Critical | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
+| [YARN-3207](https://issues.apache.org/jira/browse/YARN-3207) | secondary filter matches entites which do not have the key being filtered for. |  Major | timelineserver | Prakash Ramachandran | Zhijie Shen |
+| [YARN-3103](https://issues.apache.org/jira/browse/YARN-3103) | AMRMClientImpl does not update AMRM token properly |  Blocker | client | Jason Lowe | Jason Lowe |
+| [YARN-3094](https://issues.apache.org/jira/browse/YARN-3094) | reset timer for liveness monitors after RM recovery |  Major | resourcemanager | Jun Gong | Jun Gong |
+| [YARN-3055](https://issues.apache.org/jira/browse/YARN-3055) | The token is not renewed properly if it's shared by jobs (oozie) in DelegationTokenRenewer |  Blocker | security | Yi Liu | Daryn Sharp |
+| [YARN-3024](https://issues.apache.org/jira/browse/YARN-3024) | LocalizerRunner should give DIE action when all resources are localized |  Major | nodemanager | Chengbing Liu | Chengbing Liu |
+| [YARN-2997](https://issues.apache.org/jira/browse/YARN-2997) | NM keeps sending already-sent completed containers to RM until containers are removed from context |  Major | nodemanager | Chengbing Liu | Chengbing Liu |
+| [YARN-2992](https://issues.apache.org/jira/browse/YARN-2992) | ZKRMStateStore crashes due to session expiry |  Blocker | resourcemanager | Karthik Kambatla | Karthik Kambatla |
+| [YARN-2978](https://issues.apache.org/jira/browse/YARN-2978) | ResourceManager crashes with NPE while getting queue info |  Critical | . | Jason Tufo | Varun Saxena |
+| [YARN-2964](https://issues.apache.org/jira/browse/YARN-2964) | RM prematurely cancels tokens for jobs that submit jobs (oozie) |  Blocker | resourcemanager | Daryn Sharp | Jian He |
+| [YARN-2952](https://issues.apache.org/jira/browse/YARN-2952) | Incorrect version check in RMStateStore |  Major | . | Jian He | Rohith Sharma K S |
+| [YARN-2922](https://issues.apache.org/jira/browse/YARN-2922) | ConcurrentModificationException in CapacityScheduler's LeafQueue |  Major | capacityscheduler, resourcemanager, scheduler | Jason Tufo | Rohith Sharma K S |
+| [YARN-2917](https://issues.apache.org/jira/browse/YARN-2917) | Potential deadlock in AsyncDispatcher when system.exit called in AsyncDispatcher#dispatch and AsyscDispatcher#serviceStop from shutdown hook |  Critical | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
+| [YARN-2910](https://issues.apache.org/jira/browse/YARN-2910) | FSLeafQueue can throw ConcurrentModificationException |  Major | fairscheduler | Wilfred Spiegelenburg | Wilfred Spiegelenburg |
+| [YARN-2906](https://issues.apache.org/jira/browse/YARN-2906) | CapacitySchedulerPage shows HTML tags for a queue's Active Users |  Major | capacityscheduler | Jason Lowe | Jason Lowe |
+| [YARN-2905](https://issues.apache.org/jira/browse/YARN-2905) | AggregatedLogsBlock page can infinitely loop if the aggregated log file is corrupted |  Blocker | . | Jason Lowe | Varun Saxena |
+| [YARN-2894](https://issues.apache.org/jira/browse/YARN-2894) | When ACL's are enabled, if RM switches then application can not be viewed from web. |  Major | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
+| [YARN-2890](https://issues.apache.org/jira/browse/YARN-2890) | MiniYarnCluster should turn on timeline service if configured to do so |  Major | . | Mit Desai | Mit Desai |
+| [YARN-2874](https://issues.apache.org/jira/browse/YARN-2874) | Dead lock in "DelegationTokenRenewer" which blocks RM to execute any further apps |  Blocker | resourcemanager | Naganarasimha G R | Naganarasimha G R |
+| [YARN-2865](https://issues.apache.org/jira/browse/YARN-2865) | Application recovery continuously fails with "Application with id already present. Cannot duplicate" |  Critical | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
+| [YARN-2856](https://issues.apache.org/jira/browse/YARN-2856) | Application recovery throw InvalidStateTransitonException: Invalid event: ATTEMPT\_KILLED at ACCEPTED |  Critical | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
+| [YARN-2816](https://issues.apache.org/jira/browse/YARN-2816) | NM fail to start with NPE during container recovery |  Major | nodemanager | zhihai xu | zhihai xu |
+| [YARN-2637](https://issues.apache.org/jira/browse/YARN-2637) | maximum-am-resource-percent could be respected for both LeafQueue/User when trying to activate applications. |  Critical | resourcemanager | Wangda Tan | Craig Welch |
+| [YARN-2414](https://issues.apache.org/jira/browse/YARN-2414) | RM web UI: app page will crash if app is failed before any attempt has been created |  Major | webapp | Zhijie Shen | Wangda Tan |
+| [YARN-2340](https://issues.apache.org/jira/browse/YARN-2340) | NPE thrown when RM restart after queue is STOPPED. There after RM can not recovery application's and remain in standby |  Critical | resourcemanager, scheduler | Nishan Shetty | Rohith Sharma K S |
+| [YARN-2246](https://issues.apache.org/jira/browse/YARN-2246) | Job History Link in RM UI is redirecting to the URL which contains Job Id twice |  Major | webapp | Devaraj K | Devaraj K |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-11710](https://issues.apache.org/jira/browse/HADOOP-11710) | Make CryptoOutputStream behave like DFSOutputStream wrt synchronization |  Critical | fs | Sean Busbey | Sean Busbey |
+| [HDFS-7035](https://issues.apache.org/jira/browse/HDFS-7035) | Make adding a new data directory to the DataNode an atomic operation and improve error handling |  Major | datanode | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [YARN-3740](https://issues.apache.org/jira/browse/YARN-3740) | Fixed the typo with the configuration name: APPLICATION\_HISTORY\_PREFIX\_MAX\_APPS |  Major | resourcemanager, webapp, yarn | Xuan Gong | Xuan Gong |
+| [YARN-3700](https://issues.apache.org/jira/browse/YARN-3700) | ATS Web Performance issue at load time when large number of jobs |  Major | resourcemanager, webapp, yarn | Xuan Gong | Xuan Gong |
+| [YARN-3544](https://issues.apache.org/jira/browse/YARN-3544) | AM logs link missing in the RM UI for a completed app |  Blocker | . | Hitesh Shah | Xuan Gong |
+| [YARN-3487](https://issues.apache.org/jira/browse/YARN-3487) | CapacityScheduler scheduler lock obtained unnecessarily when calling getQueue |  Critical | capacityscheduler | Jason Lowe | Jason Lowe |
+| [YARN-3171](https://issues.apache.org/jira/browse/YARN-3171) | Sort by Application id, AppAttempt & ContainerID doesn't work in ATS / RM web ui |  Minor | timelineserver | Jeff Zhang | Naganarasimha G R |
+| [YARN-3124](https://issues.apache.org/jira/browse/YARN-3124) | Capacity Scheduler LeafQueue/ParentQueue should use QueueCapacities to track capacities-by-label |  Major | api, client, resourcemanager | Wangda Tan | Wangda Tan |
+| [YARN-3099](https://issues.apache.org/jira/browse/YARN-3099) | Capacity Scheduler LeafQueue/ParentQueue should use ResourceUsage to track used-resources-by-label. |  Major | api, client, resourcemanager | Wangda Tan | Wangda Tan |
+| [YARN-3098](https://issues.apache.org/jira/browse/YARN-3098) | Create common QueueCapacities class in Capacity Scheduler to track capacities-by-labels of queues |  Major | capacityscheduler | Wangda Tan | Wangda Tan |
+| [YARN-3092](https://issues.apache.org/jira/browse/YARN-3092) | Create common ResourceUsage class to track labeled resource usages in Capacity Scheduler |  Major | api, client, resourcemanager | Wangda Tan | Wangda Tan |
+| [YARN-3011](https://issues.apache.org/jira/browse/YARN-3011) | NM dies because of the failure of resource localization |  Major | nodemanager | Wang Hao | Varun Saxena |
+| [YARN-2920](https://issues.apache.org/jira/browse/YARN-2920) | CapacityScheduler should be notified when labels on nodes changed |  Major | . | Wangda Tan | Wangda Tan |
+| [YARN-2918](https://issues.apache.org/jira/browse/YARN-2918) | Don't fail RM if queue's configured labels are not existed in cluster-node-labels |  Major | resourcemanager | Rohith Sharma K S | Wangda Tan |
+| [YARN-2900](https://issues.apache.org/jira/browse/YARN-2900) | Application (Attempt and Container) Not Found in AHS results in Internal Server Error (500) |  Major | timelineserver | Jonathan Eagles | Mit Desai |
+| [YARN-2766](https://issues.apache.org/jira/browse/YARN-2766) |  ApplicationHistoryManager is expected to return a sorted list of apps/attempts/containers |  Major | timelineserver | Robert Kanter | Robert Kanter |
+| [YARN-2694](https://issues.apache.org/jira/browse/YARN-2694) | Ensure only single node labels specified in resource request / host, and node label expression only specified when resourceName=ANY |  Major | capacityscheduler, resourcemanager | Wangda Tan | Wangda Tan |
+| [YARN-1984](https://issues.apache.org/jira/browse/YARN-1984) | LeveldbTimelineStore does not handle db exceptions properly |  Major | . | Jason Lowe | Varun Saxena |
+| [YARN-1884](https://issues.apache.org/jira/browse/YARN-1884) | ContainerReport should have nodeHttpAddress |  Major | . | Zhijie Shen | Xuan Gong |
+| [YARN-1809](https://issues.apache.org/jira/browse/YARN-1809) | Synchronize RM and Generic History Service Web-UIs |  Major | . | Zhijie Shen | Xuan Gong |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/RELEASENOTES.2.6.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/RELEASENOTES.2.6.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/RELEASENOTES.2.6.1.md
new file mode 100644
index 0000000..a38ca60
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.1/RELEASENOTES.2.6.1.md
@@ -0,0 +1,45 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  2.6.1 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-7139](https://issues.apache.org/jira/browse/HADOOP-7139) | *Major* | **Allow appending to existing SequenceFiles**
+
+Existing sequence files can be appended.
+
+
+---
+
+* [HDFS-8486](https://issues.apache.org/jira/browse/HDFS-8486) | *Blocker* | **DN startup may cause severe data loss**
+
+<!-- markdown -->
+Public service notice:
+* Every restart of a 2.6.x or 2.7.0 DN incurs a risk of unwanted block deletion.
+* Apply this patch if you are running a pre-2.7.1 release.
+
+
+---
+
+* [HDFS-8270](https://issues.apache.org/jira/browse/HDFS-8270) | *Major* | **create() always retried with hardcoded timeout when file already exists with open lease**
+
+Proxy level retries will not be done on AlreadyBeingCreatedExeption for create() op.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/CHANGES.2.6.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/CHANGES.2.6.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/CHANGES.2.6.2.md
new file mode 100644
index 0000000..0ebba50
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/CHANGES.2.6.2.md
@@ -0,0 +1,83 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 2.6.2 - 2015-10-28
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-3727](https://issues.apache.org/jira/browse/YARN-3727) | For better error recovery, check if the directory exists before using it for localization. |  Major | nodemanager | zhihai xu | zhihai xu |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [MAPREDUCE-6497](https://issues.apache.org/jira/browse/MAPREDUCE-6497) | Fix wrong value of JOB\_FINISHED event in JobHistoryEventHandler |  Major | . | Shinichi Yamashita | Shinichi Yamashita |
+| [MAPREDUCE-6454](https://issues.apache.org/jira/browse/MAPREDUCE-6454) | MapReduce doesn't set the HADOOP\_CLASSPATH for jar lib in distributed cache. |  Critical | . | Junping Du | Junping Du |
+| [MAPREDUCE-6334](https://issues.apache.org/jira/browse/MAPREDUCE-6334) | Fetcher#copyMapOutput is leaking usedMemory upon IOException during InMemoryMapOutput shuffle handler |  Blocker | . | Eric Payne | Eric Payne |
+| [YARN-4087](https://issues.apache.org/jira/browse/YARN-4087) | Followup fixes after YARN-2019 regarding RM behavior when state-store error occurs |  Major | . | Jian He | Jian He |
+| [YARN-4005](https://issues.apache.org/jira/browse/YARN-4005) | Completed container whose app is finished is not removed from NMStateStore |  Major | . | Jun Gong | Jun Gong |
+| [YARN-3896](https://issues.apache.org/jira/browse/YARN-3896) | RMNode transitioned from RUNNING to REBOOTED because its response id had not been reset synchronously |  Major | resourcemanager | Jun Gong | Jun Gong |
+| [YARN-3802](https://issues.apache.org/jira/browse/YARN-3802) | Two RMNodes for the same NodeId are used in RM sometimes after NM is reconnected. |  Major | resourcemanager | zhihai xu | zhihai xu |
+| [YARN-3798](https://issues.apache.org/jira/browse/YARN-3798) | ZKRMStateStore shouldn't create new session without occurrance of SESSIONEXPIED |  Blocker | resourcemanager | Bibin A Chundatt | Varun Saxena |
+| [YARN-3780](https://issues.apache.org/jira/browse/YARN-3780) | Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition |  Minor | resourcemanager | zhihai xu | zhihai xu |
+| [YARN-3554](https://issues.apache.org/jira/browse/YARN-3554) | Default value for maximum nodemanager connect wait time is too high |  Major | . | Jason Lowe | Naganarasimha G R |
+| [YARN-3194](https://issues.apache.org/jira/browse/YARN-3194) | RM should handle NMContainerStatuses sent by NM while registering if NM is Reconnected node |  Blocker | resourcemanager | Rohith Sharma K S | Rohith Sharma K S |
+| [YARN-2019](https://issues.apache.org/jira/browse/YARN-2019) | Retrospect on decision of making RM crashed if any exception throw in ZKRMStateStore |  Critical | . | Junping Du | Jian He |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-4101](https://issues.apache.org/jira/browse/YARN-4101) | RM should print alert messages if Zookeeper and Resourcemanager gets connection issue |  Critical | yarn | Yesha Vora | Xuan Gong |
+| [YARN-4092](https://issues.apache.org/jira/browse/YARN-4092) | RM HA UI redirection needs to be fixed when both RMs are in standby mode |  Major | resourcemanager | Xuan Gong | Xuan Gong |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/RELEASENOTES.2.6.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/RELEASENOTES.2.6.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/RELEASENOTES.2.6.2.md
new file mode 100644
index 0000000..584ffa0
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.2/RELEASENOTES.2.6.2.md
@@ -0,0 +1,21 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  2.6.2 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/CHANGES.2.6.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/CHANGES.2.6.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/CHANGES.2.6.3.md
new file mode 100644
index 0000000..e0c4f1c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/CHANGES.2.6.3.md
@@ -0,0 +1,103 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 2.6.3 - 2015-12-17
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12413](https://issues.apache.org/jira/browse/HADOOP-12413) | AccessControlList should avoid calling getGroupNames in isUserInList with empty groups. |  Major | security | zhihai xu | zhihai xu |
+| [HDFS-9434](https://issues.apache.org/jira/browse/HDFS-9434) | Recommission a datanode with 500k blocks may pause NN for 30 seconds |  Major | namenode | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12577](https://issues.apache.org/jira/browse/HADOOP-12577) | Bump up commons-collections version to 3.2.2 to address a security flaw |  Blocker | build, security | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HADOOP-12526](https://issues.apache.org/jira/browse/HADOOP-12526) | [Branch-2] there are duplicate dependency definitions in pom's |  Major | build | Sangjin Lee | Sangjin Lee |
+| [HADOOP-12230](https://issues.apache.org/jira/browse/HADOOP-12230) | hadoop-project declares duplicate, conflicting curator dependencies |  Minor | build | Steve Loughran | Rakesh R |
+| [HADOOP-11267](https://issues.apache.org/jira/browse/HADOOP-11267) | TestSecurityUtil fails when run with JDK8 because of empty principal names |  Minor | security, test | Stephen Chu | Stephen Chu |
+| [HADOOP-10134](https://issues.apache.org/jira/browse/HADOOP-10134) | [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc comments |  Minor | . | Andrew Purtell | Andrew Purtell |
+| [HADOOP-9242](https://issues.apache.org/jira/browse/HADOOP-9242) | Duplicate surefire plugin config in hadoop-common |  Major | test | Andrey Klochkov | Andrey Klochkov |
+| [HDFS-9470](https://issues.apache.org/jira/browse/HDFS-9470) | Encryption zone on root not loaded from fsimage after NN restart |  Critical | . | Xiao Chen | Xiao Chen |
+| [HDFS-9431](https://issues.apache.org/jira/browse/HDFS-9431) | DistributedFileSystem#concat fails if the target path is relative. |  Major | hdfs-client | Kazuho Fujii | Kazuho Fujii |
+| [HDFS-9289](https://issues.apache.org/jira/browse/HDFS-9289) | Make DataStreamer#block thread safe and verify genStamp in commitBlock |  Critical | . | Chang Li | Chang Li |
+| [HDFS-9273](https://issues.apache.org/jira/browse/HDFS-9273) | ACLs on root directory may be lost after NN restart |  Critical | namenode | Xiao Chen | Xiao Chen |
+| [HDFS-9083](https://issues.apache.org/jira/browse/HDFS-9083) | Replication violates block placement policy. |  Blocker | namenode | Rushabh S Shah | Rushabh S Shah |
+| [HDFS-8615](https://issues.apache.org/jira/browse/HDFS-8615) | Correct HTTP method in WebHDFS document |  Major | documentation | Akira AJISAKA | Brahma Reddy Battula |
+| [MAPREDUCE-6549](https://issues.apache.org/jira/browse/MAPREDUCE-6549) | multibyte delimiters with LineRecordReader cause duplicate records |  Major | mrv1, mrv2 | Dustin Cote | Wilfred Spiegelenburg |
+| [MAPREDUCE-6540](https://issues.apache.org/jira/browse/MAPREDUCE-6540) | TestMRTimelineEventHandling fails |  Major | test | Sangjin Lee | Sangjin Lee |
+| [MAPREDUCE-6528](https://issues.apache.org/jira/browse/MAPREDUCE-6528) | Memory leak for HistoryFileManager.getJobSummary() |  Critical | jobhistoryserver | Junping Du | Junping Du |
+| [MAPREDUCE-6481](https://issues.apache.org/jira/browse/MAPREDUCE-6481) | LineRecordReader may give incomplete record and wrong position/key information for uncompressed input sometimes. |  Critical | mrv2 | zhihai xu | zhihai xu |
+| [MAPREDUCE-6377](https://issues.apache.org/jira/browse/MAPREDUCE-6377) | JHS sorting on state column not working in webUi |  Minor | jobhistoryserver | Bibin A Chundatt | zhihai xu |
+| [MAPREDUCE-6273](https://issues.apache.org/jira/browse/MAPREDUCE-6273) | HistoryFileManager should check whether summaryFile exists to avoid FileNotFoundException causing HistoryFileInfo into MOVE\_FAILED state |  Minor | jobhistoryserver | zhihai xu | zhihai xu |
+| [MAPREDUCE-5948](https://issues.apache.org/jira/browse/MAPREDUCE-5948) | org.apache.hadoop.mapred.LineRecordReader does not handle multibyte record delimiters well |  Critical | . | Kris Geusebroek | Akira AJISAKA |
+| [MAPREDUCE-5883](https://issues.apache.org/jira/browse/MAPREDUCE-5883) | "Total megabyte-seconds" in job counters is slightly misleading |  Minor | . | Nathan Roberts | Nathan Roberts |
+| [YARN-4434](https://issues.apache.org/jira/browse/YARN-4434) | NodeManager Disk Checker parameter documentation is not correct |  Minor | documentation, nodemanager | Takashi Ohnishi | Weiwei Yang |
+| [YARN-4424](https://issues.apache.org/jira/browse/YARN-4424) | Fix deadlock in RMAppImpl |  Blocker | . | Yesha Vora | Jian He |
+| [YARN-4365](https://issues.apache.org/jira/browse/YARN-4365) | FileSystemNodeLabelStore should check for root dir existence on startup |  Major | resourcemanager | Jason Lowe | Kuhu Shukla |
+| [YARN-4348](https://issues.apache.org/jira/browse/YARN-4348) | ZKRMStateStore.syncInternal shouldn't wait for sync completion for avoiding blocking ZK's event thread |  Blocker | . | Tsuyoshi Ozawa | Tsuyoshi Ozawa |
+| [YARN-4344](https://issues.apache.org/jira/browse/YARN-4344) | NMs reconnecting with changed capabilities can lead to wrong cluster resource calculations |  Critical | resourcemanager | Varun Vasudev | Varun Vasudev |
+| [YARN-4326](https://issues.apache.org/jira/browse/YARN-4326) | Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer binds to default port 8188 |  Major | . | MENG DING | MENG DING |
+| [YARN-4320](https://issues.apache.org/jira/browse/YARN-4320) | TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer binds to default port 8188 |  Major | . | Varun Saxena | Varun Saxena |
+| [YARN-4312](https://issues.apache.org/jira/browse/YARN-4312) | TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6 as some of the test cases time out |  Major | . | Varun Saxena | Varun Saxena |
+| [YARN-4241](https://issues.apache.org/jira/browse/YARN-4241) | Fix typo of property name in yarn-default.xml |  Major | documentation | Anthony Rojas | Anthony Rojas |
+| [YARN-3925](https://issues.apache.org/jira/browse/YARN-3925) | ContainerLogsUtils#getContainerLogFile fails to read container log files from full disks. |  Critical | nodemanager | zhihai xu | zhihai xu |
+| [YARN-3878](https://issues.apache.org/jira/browse/YARN-3878) | AsyncDispatcher can hang while stopping if it is configured for draining events on stop |  Critical | . | Varun Saxena | Varun Saxena |
+| [YARN-2859](https://issues.apache.org/jira/browse/YARN-2859) | ApplicationHistoryServer binds to default port 8188 in MiniYARNCluster |  Critical | timelineserver | Hitesh Shah | Vinod Kumar Vavilapalli |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-10668](https://issues.apache.org/jira/browse/HADOOP-10668) | TestZKFailoverControllerStress#testExpireBackAndForth occasionally fails |  Major | test | Ted Yu | Ming Ma |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/RELEASENOTES.2.6.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/RELEASENOTES.2.6.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/RELEASENOTES.2.6.3.md
new file mode 100644
index 0000000..e262545
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.3/RELEASENOTES.2.6.3.md
@@ -0,0 +1,21 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  2.6.3 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/CHANGES.2.6.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/CHANGES.2.6.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/CHANGES.2.6.4.md
new file mode 100644
index 0000000..e38a93a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/CHANGES.2.6.4.md
@@ -0,0 +1,114 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 2.6.4 - 2016-02-11
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-3154](https://issues.apache.org/jira/browse/YARN-3154) | Should not upload partial logs for MR jobs or other "short-running' applications |  Blocker | nodemanager, resourcemanager | Xuan Gong | Xuan Gong |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-9415](https://issues.apache.org/jira/browse/HDFS-9415) | Document dfs.cluster.administrators and dfs.permissions.superusergroup |  Major | documentation | Arpit Agarwal | Xiaobing Zhou |
+| [HDFS-9314](https://issues.apache.org/jira/browse/HDFS-9314) | Improve BlockPlacementPolicyDefault's picking of excess replicas |  Major | . | Ming Ma | Xiao Chen |
+| [HDFS-8722](https://issues.apache.org/jira/browse/HDFS-8722) | Optimize datanode writes for small writes and flushes |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-8647](https://issues.apache.org/jira/browse/HDFS-8647) | Abstract BlockManager's rack policy into BlockPlacementPolicy |  Major | . | Ming Ma | Brahma Reddy Battula |
+| [HDFS-7694](https://issues.apache.org/jira/browse/HDFS-7694) | FSDataInputStream should support "unbuffer" |  Major | . | Colin Patrick McCabe | Colin Patrick McCabe |
+| [MAPREDUCE-6436](https://issues.apache.org/jira/browse/MAPREDUCE-6436) | JobHistory cache issue |  Blocker | . | Ryu Kobayashi | Kai Sasaki |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12706](https://issues.apache.org/jira/browse/HADOOP-12706) | TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp times out occasionally |  Major | test | Jason Lowe | Sangjin Lee |
+| [HADOOP-12107](https://issues.apache.org/jira/browse/HADOOP-12107) | long running apps may have a huge number of StatisticsData instances under FileSystem |  Critical | fs | Sangjin Lee | Sangjin Lee |
+| [HADOOP-11252](https://issues.apache.org/jira/browse/HADOOP-11252) | RPC client does not time out by default |  Critical | ipc | Wilfred Spiegelenburg | Masatake Iwasaki |
+| [HDFS-9600](https://issues.apache.org/jira/browse/HDFS-9600) | do not check replication if the block is under construction |  Critical | . | Phil Yang | Phil Yang |
+| [HDFS-9574](https://issues.apache.org/jira/browse/HDFS-9574) | Reduce client failures during datanode restart |  Major | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-9445](https://issues.apache.org/jira/browse/HDFS-9445) | Datanode may deadlock while handling a bad volume |  Blocker | . | Kihwal Lee | Walter Su |
+| [HDFS-9313](https://issues.apache.org/jira/browse/HDFS-9313) | Possible NullPointerException in BlockManager if no excess replica can be chosen |  Major | . | Ming Ma | Ming Ma |
+| [HDFS-9294](https://issues.apache.org/jira/browse/HDFS-9294) | DFSClient  deadlock when close file and failed to renew lease |  Blocker | hdfs-client | DENG FEI | Brahma Reddy Battula |
+| [HDFS-9220](https://issues.apache.org/jira/browse/HDFS-9220) | Reading small file (\< 512 bytes) that is open for append fails due to incorrect checksum |  Blocker | . | Bogdan Raducanu | Jing Zhao |
+| [HDFS-9178](https://issues.apache.org/jira/browse/HDFS-9178) | Slow datanode I/O can cause a wrong node to be marked bad |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-8767](https://issues.apache.org/jira/browse/HDFS-8767) | RawLocalFileSystem.listStatus() returns null for UNIX pipefile |  Critical | . | Haohui Mai | Kanaka Kumar Avvaru |
+| [HDFS-6945](https://issues.apache.org/jira/browse/HDFS-6945) | BlockManager should remove a block from excessReplicateMap and decrement ExcessBlocks metric when the block is removed |  Critical | namenode | Akira AJISAKA | Akira AJISAKA |
+| [HDFS-4660](https://issues.apache.org/jira/browse/HDFS-4660) | Block corruption can happen during pipeline recovery |  Blocker | datanode | Peng Zhang | Kihwal Lee |
+| [MAPREDUCE-6621](https://issues.apache.org/jira/browse/MAPREDUCE-6621) | Memory Leak in JobClient#submitJobInternal() |  Major | . | Xuan Gong | Xuan Gong |
+| [MAPREDUCE-6619](https://issues.apache.org/jira/browse/MAPREDUCE-6619) | HADOOP\_CLASSPATH is overwritten in MR container |  Major | mrv2 | shanyu zhao | Junping Du |
+| [MAPREDUCE-6618](https://issues.apache.org/jira/browse/MAPREDUCE-6618) | YarnClientProtocolProvider leaking the YarnClient thread. |  Major | . | Xuan Gong | Xuan Gong |
+| [MAPREDUCE-6577](https://issues.apache.org/jira/browse/MAPREDUCE-6577) | MR AM unable to load native library without MR\_AM\_ADMIN\_USER\_ENV set |  Critical | mr-am | Sangjin Lee | Sangjin Lee |
+| [MAPREDUCE-6554](https://issues.apache.org/jira/browse/MAPREDUCE-6554) | MRAppMaster servicestart failing  with NPE in MRAppMaster#parsePreviousJobHistory |  Critical | . | Bibin A Chundatt | Bibin A Chundatt |
+| [MAPREDUCE-6492](https://issues.apache.org/jira/browse/MAPREDUCE-6492) | AsyncDispatcher exit with NPE on TaskAttemptImpl#sendJHStartEventForAssignedFailTask |  Critical | . | Bibin A Chundatt | Bibin A Chundatt |
+| [MAPREDUCE-6363](https://issues.apache.org/jira/browse/MAPREDUCE-6363) | [NNBench] Lease mismatch error when running with multiple mappers |  Critical | benchmarks | Brahma Reddy Battula | Bibin A Chundatt |
+| [MAPREDUCE-5982](https://issues.apache.org/jira/browse/MAPREDUCE-5982) | Task attempts that fail from the ASSIGNED state can disappear |  Major | mr-am | Jason Lowe | Chang Li |
+| [YARN-4598](https://issues.apache.org/jira/browse/YARN-4598) | Invalid event: RESOURCE\_FAILED at CONTAINER\_CLEANEDUP\_AFTER\_KILL |  Major | nodemanager | tangshangwen | tangshangwen |
+| [YARN-4581](https://issues.apache.org/jira/browse/YARN-4581) | AHS writer thread leak makes RM crash while RM is recovering |  Major | resourcemanager | sandflee | sandflee |
+| [YARN-4546](https://issues.apache.org/jira/browse/YARN-4546) | ResourceManager crash due to scheduling opportunity overflow |  Critical | resourcemanager | Jason Lowe | Jason Lowe |
+| [YARN-4452](https://issues.apache.org/jira/browse/YARN-4452) | NPE when submit Unmanaged application |  Critical | . | Naganarasimha G R | Naganarasimha G R |
+| [YARN-4414](https://issues.apache.org/jira/browse/YARN-4414) | Nodemanager connection errors are retried at multiple levels |  Major | nodemanager | Jason Lowe | Chang Li |
+| [YARN-4380](https://issues.apache.org/jira/browse/YARN-4380) | TestResourceLocalizationService.testDownloadingResourcesOnContainerKill fails intermittently |  Major | test | Tsuyoshi Ozawa | Varun Saxena |
+| [YARN-4354](https://issues.apache.org/jira/browse/YARN-4354) | Public resource localization fails with NPE |  Blocker | nodemanager | Jason Lowe | Jason Lowe |
+| [YARN-4180](https://issues.apache.org/jira/browse/YARN-4180) | AMLauncher does not retry on failures when talking to NM |  Critical | resourcemanager | Anubhav Dhoot | Anubhav Dhoot |
+| [YARN-3857](https://issues.apache.org/jira/browse/YARN-3857) | Memory leak in ResourceManager with SIMPLE mode |  Critical | resourcemanager | mujunchao | mujunchao |
+| [YARN-3849](https://issues.apache.org/jira/browse/YARN-3849) | Too much of preemption activity causing continuos killing of containers across queues |  Critical | capacityscheduler | Sunil G | Sunil G |
+| [YARN-3842](https://issues.apache.org/jira/browse/YARN-3842) | NMProxy should retry on NMNotYetReadyException |  Critical | . | Karthik Kambatla | Robert Kanter |
+| [YARN-3697](https://issues.apache.org/jira/browse/YARN-3697) | FairScheduler: ContinuousSchedulingThread can fail to shutdown |  Critical | fairscheduler | zhihai xu | zhihai xu |
+| [YARN-3695](https://issues.apache.org/jira/browse/YARN-3695) | ServerProxy (NMProxy, etc.) shouldn't retry forever for non network exception. |  Major | . | Junping Du | Raju Bairishetti |
+| [YARN-3535](https://issues.apache.org/jira/browse/YARN-3535) | Scheduler must re-request container resources when RMContainer transitions from ALLOCATED to KILLED |  Critical | capacityscheduler, fairscheduler, resourcemanager | Peng Zhang | Peng Zhang |
+| [YARN-2975](https://issues.apache.org/jira/browse/YARN-2975) | FSLeafQueue app lists are accessed without required locks |  Blocker | . | Karthik Kambatla | Karthik Kambatla |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12736](https://issues.apache.org/jira/browse/HADOOP-12736) | TestTimedOutTestsListener#testThreadDumpAndDeadlocks sometimes times out |  Major | . | Xiao Chen | Xiao Chen |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-3893](https://issues.apache.org/jira/browse/YARN-3893) | Both RM in active state when Admin#transitionToActive failure from refeshAll() |  Critical | resourcemanager | Bibin A Chundatt | Bibin A Chundatt |
+| [YARN-2902](https://issues.apache.org/jira/browse/YARN-2902) | Killing a container that is localizing can orphan resources in the DOWNLOADING state |  Major | nodemanager | Jason Lowe | Varun Saxena |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/RELEASENOTES.2.6.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/RELEASENOTES.2.6.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/RELEASENOTES.2.6.4.md
new file mode 100644
index 0000000..3eee62b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.4/RELEASENOTES.2.6.4.md
@@ -0,0 +1,28 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  2.6.4 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [YARN-3154](https://issues.apache.org/jira/browse/YARN-3154) | *Blocker* | **Should not upload partial logs for MR jobs or other "short-running' applications**
+
+Applications which made use of the LogAggregationContext in their application will need to revisit this code in order to make sure that their logs continue to get rolled out.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/CHANGES.2.7.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/CHANGES.2.7.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/CHANGES.2.7.0.md
index 81ff61e..3e27138 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/CHANGES.2.7.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/CHANGES.2.7.0.md
@@ -24,17 +24,24 @@
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |
 |:---- |:---- | :--- |:---- |:---- |:---- |
-| [HADOOP-11498](https://issues.apache.org/jira/browse/HADOOP-11498) | Bump the version of HTrace to 3.1.0-incubating |  Major | . | Masatake Iwasaki | Masatake Iwasaki |
+| [HADOOP-11498](https://issues.apache.org/jira/browse/HADOOP-11498) | Bump the version of HTrace to 3.1.0-incubating |  Major | tracing | Masatake Iwasaki | Masatake Iwasaki |
 | [HADOOP-11492](https://issues.apache.org/jira/browse/HADOOP-11492) | Bump up curator version to 2.7.1 |  Major | . | Karthik Kambatla | Arun Suresh |
 | [HADOOP-11385](https://issues.apache.org/jira/browse/HADOOP-11385) | Prevent cross site scripting attack on JMXJSONServlet |  Critical | . | Haohui Mai | Haohui Mai |
 | [HADOOP-11311](https://issues.apache.org/jira/browse/HADOOP-11311) | Restrict uppercase key names from being created with JCEKS |  Major | security | Andrew Wang | Andrew Wang |
 | [HADOOP-10530](https://issues.apache.org/jira/browse/HADOOP-10530) | Make hadoop trunk build on Java7+ only |  Blocker | build | Steve Loughran | Steve Loughran |
+| [HDFS-7210](https://issues.apache.org/jira/browse/HDFS-7210) | Avoid two separate RPC's namenode.append() and namenode.getFileInfo() for an append call from DFSClient |  Major | hdfs-client, namenode | Vinayakumar B | Vinayakumar B |
 | [HDFS-6651](https://issues.apache.org/jira/browse/HDFS-6651) | Deletion failure can leak inodes permanently |  Critical | . | Kihwal Lee | Jing Zhao |
 | [HDFS-6252](https://issues.apache.org/jira/browse/HDFS-6252) | Phase out the old web UI in HDFS |  Minor | namenode | Fengdong Yu | Haohui Mai |
 | [YARN-3217](https://issues.apache.org/jira/browse/YARN-3217) | Remove httpclient dependency from hadoop-yarn-server-web-proxy |  Major | . | Akira AJISAKA | Brahma Reddy Battula |
 | [YARN-3154](https://issues.apache.org/jira/browse/YARN-3154) | Should not upload partial logs for MR jobs or other "short-running' applications |  Blocker | nodemanager, resourcemanager | Xuan Gong | Xuan Gong |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |
@@ -125,7 +132,6 @@
 | [HADOOP-10976](https://issues.apache.org/jira/browse/HADOOP-10976) | moving the source code of hadoop-tools docs to the directory under hadoop-tools |  Minor | documentation | Masatake Iwasaki | Masatake Iwasaki |
 | [HADOOP-10847](https://issues.apache.org/jira/browse/HADOOP-10847) | Remove the usage of sun.security.x509.\* in testing code |  Minor | security | Kai Zheng | pascal oliva |
 | [HADOOP-10809](https://issues.apache.org/jira/browse/HADOOP-10809) | hadoop-azure: page blob support |  Major | tools | Mike Liddell | Eric Hanson |
-| [HADOOP-10786](https://issues.apache.org/jira/browse/HADOOP-10786) | Fix UGI#reloginFromKeytab on Java 8 |  Major | security | Tobi Vollebregt | Stephen Chu |
 | [HADOOP-10670](https://issues.apache.org/jira/browse/HADOOP-10670) | Allow AuthenticationFilters to load secret from signature secret files |  Minor | security | Kai Zheng | Kai Zheng |
 | [HADOOP-10626](https://issues.apache.org/jira/browse/HADOOP-10626) | Limit Returning Attributes for LDAP search |  Major | security | Jason Hubbard | Jason Hubbard |
 | [HADOOP-10563](https://issues.apache.org/jira/browse/HADOOP-10563) | Remove the dependency of jsp in trunk |  Major | . | Haohui Mai | Haohui Mai |
@@ -221,7 +227,6 @@
 | [HDFS-7252](https://issues.apache.org/jira/browse/HDFS-7252) | small refinement to the use of isInAnEZ in FSNamesystem |  Trivial | . | Yi Liu | Yi Liu |
 | [HDFS-7242](https://issues.apache.org/jira/browse/HDFS-7242) | Code improvement for FSN#checkUnreadableBySuperuser |  Minor | namenode | Yi Liu | Yi Liu |
 | [HDFS-7223](https://issues.apache.org/jira/browse/HDFS-7223) | Tracing span description of IPC client is too long |  Minor | . | Masatake Iwasaki | Masatake Iwasaki |
-| [HDFS-7210](https://issues.apache.org/jira/browse/HDFS-7210) | Avoid two separate RPC's namenode.append() and namenode.getFileInfo() for an append call from DFSClient |  Major | hdfs-client, namenode | Vinayakumar B | Vinayakumar B |
 | [HDFS-7190](https://issues.apache.org/jira/browse/HDFS-7190) | Bad use of Preconditions in startFileInternal() |  Major | namenode | Konstantin Shvachko | Dawson Choong |
 | [HDFS-7186](https://issues.apache.org/jira/browse/HDFS-7186) | Document the "hadoop trace" command. |  Minor | documentation | Masatake Iwasaki | Masatake Iwasaki |
 | [HDFS-7182](https://issues.apache.org/jira/browse/HDFS-7182) | JMX metrics aren't accessible when NN is busy |  Major | . | Ming Ma | Ming Ma |
@@ -231,7 +236,7 @@
 | [HDFS-6741](https://issues.apache.org/jira/browse/HDFS-6741) | Improve permission denied message when FSPermissionChecker#checkOwner fails |  Trivial | . | Stephen Chu | Harsh J |
 | [HDFS-6735](https://issues.apache.org/jira/browse/HDFS-6735) | A minor optimization to avoid pread() be blocked by read() inside the same DFSInputStream |  Major | hdfs-client | Liang Xie | Lars Hofhansl |
 | [HDFS-6565](https://issues.apache.org/jira/browse/HDFS-6565) | Use jackson instead jetty json in hdfs-client |  Major | . | Haohui Mai | Akira AJISAKA |
-| [HDFS-6133](https://issues.apache.org/jira/browse/HDFS-6133) | Make Balancer support exclude specified path |  Major | balancer & mover, datanode | zhaoyunjiong | zhaoyunjiong |
+| [HDFS-6133](https://issues.apache.org/jira/browse/HDFS-6133) | Add a feature for replica pinning so that a pinned replica will not be moved by Balancer/Mover. |  Major | balancer & mover, datanode | zhaoyunjiong | zhaoyunjiong |
 | [HDFS-5853](https://issues.apache.org/jira/browse/HDFS-5853) | Add "hadoop.user.group.metrics.percentiles.intervals" to hdfs-default.xml |  Minor | documentation, namenode | Akira AJISAKA | Akira AJISAKA |
 | [HDFS-3342](https://issues.apache.org/jira/browse/HDFS-3342) | SocketTimeoutException in BlockSender.sendChunks could have a better error message |  Minor | datanode | Todd Lipcon | Yongjun Zhang |
 | [HDFS-2219](https://issues.apache.org/jira/browse/HDFS-2219) | Fsck should work with fully qualified file paths. |  Minor | tools | Jitendra Nath Pandey | Tsz Wo Nicholas Sze |
@@ -449,6 +454,7 @@
 | [HADOOP-10953](https://issues.apache.org/jira/browse/HADOOP-10953) | NetworkTopology#add calls NetworkTopology#toString without holding the netlock |  Minor | net | Liang Xie | Liang Xie |
 | [HADOOP-10852](https://issues.apache.org/jira/browse/HADOOP-10852) | NetgroupCache is not thread-safe |  Major | security | Benoy Antony | Benoy Antony |
 | [HADOOP-10840](https://issues.apache.org/jira/browse/HADOOP-10840) | Fix OutOfMemoryError caused by metrics system in Azure File System |  Major | metrics | shanyu zhao | shanyu zhao |
+| [HADOOP-10786](https://issues.apache.org/jira/browse/HADOOP-10786) | Fix UGI#reloginFromKeytab on Java 8 |  Major | security | Tobi Vollebregt | Stephen Chu |
 | [HADOOP-10748](https://issues.apache.org/jira/browse/HADOOP-10748) | HttpServer2 should not load JspServlet |  Major | . | Haohui Mai | Haohui Mai |
 | [HADOOP-10717](https://issues.apache.org/jira/browse/HADOOP-10717) | HttpServer2 should load jsp DTD from local jars instead of going remote |  Blocker | . | Dapeng Sun | Dapeng Sun |
 | [HADOOP-10714](https://issues.apache.org/jira/browse/HADOOP-10714) | AmazonS3Client.deleteObjects() need to be limited to 1000 entries per call |  Critical | fs/s3 | David S. Wang | Juan Yu |
@@ -479,7 +485,7 @@
 | [HDFS-7960](https://issues.apache.org/jira/browse/HDFS-7960) | The full block report should prune zombie storages even if they're not empty |  Critical | . | Lei (Eddy) Xu | Colin Patrick McCabe |
 | [HDFS-7957](https://issues.apache.org/jira/browse/HDFS-7957) | Truncate should verify quota before making changes |  Critical | namenode | Jing Zhao | Jing Zhao |
 | [HDFS-7956](https://issues.apache.org/jira/browse/HDFS-7956) | Improve logging for DatanodeRegistration. |  Major | namenode | Konstantin Shvachko | Plamen Jeliazkov |
-| [HDFS-7953](https://issues.apache.org/jira/browse/HDFS-7953) | NN Web UI fails to navigate to paths that contain # |  Minor | namenode | kanaka kumar avvaru | kanaka kumar avvaru |
+| [HDFS-7953](https://issues.apache.org/jira/browse/HDFS-7953) | NN Web UI fails to navigate to paths that contain # |  Minor | namenode | Kanaka Kumar Avvaru | Kanaka Kumar Avvaru |
 | [HDFS-7945](https://issues.apache.org/jira/browse/HDFS-7945) | The WebHdfs system on DN does not honor the length parameter |  Blocker | . | Haohui Mai | Haohui Mai |
 | [HDFS-7943](https://issues.apache.org/jira/browse/HDFS-7943) | Append cannot handle the last block with length greater than the preferred block size |  Blocker | . | Jing Zhao | Jing Zhao |
 | [HDFS-7942](https://issues.apache.org/jira/browse/HDFS-7942) | NFS: support regexp grouping in nfs.exports.allowed.hosts |  Major | nfs | Brandon Li | Brandon Li |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/RELEASENOTES.2.7.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/RELEASENOTES.2.7.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/RELEASENOTES.2.7.0.md
index e429b22..d8c4b7c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/RELEASENOTES.2.7.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.0/RELEASENOTES.2.7.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.7.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -30,13 +30,6 @@ ProtocolBuffer is packaged in Ubuntu
 
 ---
 
-* [HADOOP-11729](https://issues.apache.org/jira/browse/HADOOP-11729) | *Minor* | **Fix link to cgroups doc in site.xml**
-
-Committed this to trunk, branch-2, and branch-2.7. Thanks Masatake for your contribution!
-
-
----
-
 * [HADOOP-11498](https://issues.apache.org/jira/browse/HADOOP-11498) | *Major* | **Bump the version of HTrace to 3.1.0-incubating**
 
 **WARNING: No release note provided for this incompatible change.**
@@ -44,21 +37,16 @@ Committed this to trunk, branch-2, and branch-2.7. Thanks Masatake for your cont
 
 ---
 
-* [HADOOP-11497](https://issues.apache.org/jira/browse/HADOOP-11497) | *Major* | **Fix typo in ClusterSetup.html#Hadoop\_Startup**
-
-Correct startup command for cluster data nodes
-
-
----
-
 * [HADOOP-11492](https://issues.apache.org/jira/browse/HADOOP-11492) | *Major* | **Bump up curator version to 2.7.1**
 
+<!-- markdown -->
 Apache Curator version change: Apache Hadoop has updated the version of Apache Curator used from 2.6.0 to 2.7.1. This change should be binary and source compatible for the majority of downstream users. Notable exceptions:
-# Binary incompatible change: org.apache.curator.utils.PathUtils.validatePath(String) changed return types. Downstream users of this method will need to recompile.
-# Source incompatible change: org.apache.curator.framework.recipes.shared.SharedCountReader added a method to its interface definition. Downstream users with custom implementations of this interface can continue without binary compatibility problems but will need to modify their source code to recompile.
-# Source incompatible change: org.apache.curator.framework.recipes.shared.SharedValueReader added a method to its interface definition. Downstream users with custom implementations of this interface can continue without binary compatibility problems but will need to modify their source code to recompile.
 
-Downstream users are reminded that while the Hadoop community will attempt to avoid egregious incompatible dependency changes, there is currently no policy around when Hadoop's exposed dependencies will change across versions (ref http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Compatibility.html#Java\_Classpath).
+* Binary incompatible change: org.apache.curator.utils.PathUtils.validatePath(String) changed return types. Downstream users of this method will need to recompile.
+* Source incompatible change: org.apache.curator.framework.recipes.shared.SharedCountReader added a method to its interface definition. Downstream users with custom implementations of this interface can continue without binary compatibility problems but will need to modify their source code to recompile.
+* Source incompatible change: org.apache.curator.framework.recipes.shared.SharedValueReader added a method to its interface definition. Downstream users with custom implementations of this interface can continue without binary compatibility problems but will need to modify their source code to recompile.
+
+Downstream users are reminded that while the Hadoop community will attempt to avoid egregious incompatible dependency changes, there is currently no policy around when Hadoop's exposed dependencies will change across versions (ref http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Compatibility.html#Java_Classpath ).
 
 
 ---
@@ -88,13 +76,6 @@ fs.s3a.max.total.tasks:    the maximum number of tasks that the LinkedBlockingQu
 
 ---
 
-* [HADOOP-11348](https://issues.apache.org/jira/browse/HADOOP-11348) | *Minor* | **Remove unused variable from CMake error message for finding openssl**
-
-Test failure is unrelated.  Committed to 2.7.  Thanks, Dian.
-
-
----
-
 * [HADOOP-11311](https://issues.apache.org/jira/browse/HADOOP-11311) | *Major* | **Restrict uppercase key names from being created with JCEKS**
 
 Keys with uppercase names can no longer be created when using the JavaKeyStoreProvider to resolve ambiguity about case-sensitivity in the KeyStore spec.
@@ -148,13 +129,6 @@ New fs -find command
 
 ---
 
-* [HDFS-8001](https://issues.apache.org/jira/browse/HDFS-8001) | *Trivial* | **RpcProgramNfs3 : wrong parsing of dfs.blocksize**
-
-patch is fully backward compatible.
-
-
----
-
 * [HDFS-7806](https://issues.apache.org/jira/browse/HDFS-7806) | *Minor* | **Refactor: move StorageType from hadoop-hdfs to hadoop-common**
 
 This fix moves the public class StorageType from the package org.apache.hadoop.hdfs to org.apache.hadoop.fs.
@@ -177,13 +151,6 @@ LibHDFS now supports 32-bit build targets on Windows.
 
 ---
 
-* [HDFS-7457](https://issues.apache.org/jira/browse/HDFS-7457) | *Major* | **DatanodeID generates excessive garbage**
-
-Thanks for the reviews, gentlemen. I've committed this to trunk and branch-2. Thanks for identifying and working on the issue, Daryn.
-
-
----
-
 * [HDFS-7411](https://issues.apache.org/jira/browse/HDFS-7411) | *Major* | **Refactor and improve decommissioning logic into DecommissionManager**
 
 This change introduces a new configuration key used to throttle decommissioning work, "dfs.namenode.decommission.blocks.per.interval". This new key overrides and deprecates the previous related configuration key "dfs.namenode.decommission.nodes.per.interval". The new key is intended to result in more predictable pause times while scanning decommissioning nodes.
@@ -191,17 +158,16 @@ This change introduces a new configuration key used to throttle decommissioning
 
 ---
 
-* [HDFS-7326](https://issues.apache.org/jira/browse/HDFS-7326) | *Minor* | **Add documentation for hdfs debug commands**
+* [HDFS-7270](https://issues.apache.org/jira/browse/HDFS-7270) | *Major* | **Add congestion signaling capability to DataNode write protocol**
 
-Added documentation for the hdfs debug commands to the following URL in the documentation website.
+Introduced a new configuration dfs.pipeline.ecn. When the configuration is turned on, DataNodes will signal in the writing pipelines when they are overloaded. The client can back off based on this congestion signal to avoid overloading the system.
 
-hadoop-project-dist/hadoop-hdfs/HDFSCommands.html
 
-In order to view the new documentation, build the website in a staging area:
-$ mvn clean site; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
+---
+
+* [HDFS-7210](https://issues.apache.org/jira/browse/HDFS-7210) | *Major* | **Avoid two separate RPC's namenode.append() and namenode.getFileInfo() for an append call from DFSClient**
 
-Point your browser to 
-file:///tmp/hadoop-site/hadoop-project/hadoop-project-dist/hadoop-hdfs/HDFSCommands.html
+**WARNING: No release note provided for this incompatible change.**
 
 
 ---
@@ -220,9 +186,9 @@ file:///tmp/hadoop-site/hadoop-project/hadoop-project-dist/hadoop-hdfs/HDFSComma
 
 ---
 
-* [HDFS-6133](https://issues.apache.org/jira/browse/HDFS-6133) | *Major* | **Make Balancer support exclude specified path**
+* [HDFS-6133](https://issues.apache.org/jira/browse/HDFS-6133) | *Major* | **Add a feature for replica pinning so that a pinned replica will not be moved by Balancer/Mover.**
 
-Add a feature for replica pinning so that when a replica is pinned in a datanode, it will not be moved by Balancer/Mover.  The replica pinning feature can be enabled/disabled by "dfs.datanode.block-pinning.enabled", where the default is false.
+Add a feature for replica pinning so that when a replica is pinned in a datanode, it will not be moved by Balancer/Mover. The replica pinning feature can be enabled/disabled by "dfs.datanode.block-pinning.enabled", where the default is false.
 
 
 ---
@@ -252,6 +218,7 @@ Based on the reconfiguration framework provided by HADOOP-7001, enable reconfigu
 
 * [MAPREDUCE-5583](https://issues.apache.org/jira/browse/MAPREDUCE-5583) | *Major* | **Ability to limit running map and reduce tasks**
 
+<!-- markdown -->
 This introduces two new MR2 job configs, mentioned below, which allow users to control the maximum simultaneously-running tasks of the submitted job, across the cluster:
 
 * mapreduce.job.running.map.limit (default: 0, for no limit)
@@ -274,22 +241,4 @@ Removed commons-httpclient dependency from hadoop-yarn-server-web-proxy module.
 Applications which made use of the LogAggregationContext in their application will need to revisit this code in order to make sure that their logs continue to get rolled out.
 
 
----
-
-* [YARN-2230](https://issues.apache.org/jira/browse/YARN-2230) | *Minor* | **Fix description of yarn.scheduler.maximum-allocation-vcores in yarn-default.xml (or code)**
-
-I have modified the description of the yarn.scheduler.maximum-allocation-vcores setting in yarn-default.xml to be reflective of the actual behavior (throw InvalidRequestException when the limit is crossed).
-
-Since this is a documentation change, I have not added any test cases.
-
-Please review the patch, thanks!
-
-
----
-
-* [YARN-1904](https://issues.apache.org/jira/browse/YARN-1904) | *Major* | **Uniform the XXXXNotFound messages from ClientRMService and ApplicationHistoryClientService**
-
-I just committed this. Thanks Zhijie!
-
-
 


[10/34] hadoop git commit: HDFS-9865. TestBlockReplacement fails intermittently in trunk (Lin Yiqun via iwasakims)

Posted by ar...@apache.org.
HDFS-9865. TestBlockReplacement fails intermittently in trunk (Lin Yiqun via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d718fc1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d718fc1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d718fc1e

Branch: refs/heads/HDFS-1312
Commit: d718fc1ee5aee3628e105339ee3ea183b6242409
Parents: 4163e36
Author: Masatake Iwasaki <iw...@apache.org>
Authored: Tue Mar 8 02:07:18 2016 +0900
Committer: Masatake Iwasaki <iw...@apache.org>
Committed: Tue Mar 8 02:07:18 2016 +0900

----------------------------------------------------------------------
 .../server/datanode/TestBlockReplacement.java     | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d718fc1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index bfd02e2..286a180 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -412,11 +412,19 @@ public class TestBlockReplacement {
           (DatanodeInfo)sourceDnDesc, (DatanodeInfo)sourceDnDesc,
           (DatanodeInfo)destDnDesc));
       // Waiting for the FsDatasetAsyncDsikService to delete the block
-      Thread.sleep(3000);
-      // Triggering the incremental block report to report the deleted block to
-      // namnemode
-      cluster.getDataNodes().get(0).triggerBlockReport(
-         new BlockReportOptions.Factory().setIncremental(true).build());
+      for (int tries = 0; tries < 20; tries++) {
+        Thread.sleep(1000);
+        // Triggering the deletion block report to report the deleted block
+        // to namnemode
+        DataNodeTestUtils.triggerDeletionReport(cluster.getDataNodes().get(0));
+        locatedBlocks =
+            client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L)
+                .getLocatedBlocks();
+        // If block was deleted and only on 1 datanode then break out
+        if (locatedBlocks.get(0).getLocations().length == 1) {
+          break;
+        }
+      }
 
       cluster.transitionToStandby(0);
       cluster.transitionToActive(1);


[14/34] hadoop git commit: HDFS-9812. Streamer threads leak if failure happens when closing DFSOutputStream. Contributed by Lin Yiqun.

Posted by ar...@apache.org.
HDFS-9812. Streamer threads leak if failure happens when closing DFSOutputStream. Contributed by Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/352d299c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/352d299c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/352d299c

Branch: refs/heads/HDFS-1312
Commit: 352d299cf8ebe330d24117df98d1e6a64ae38c26
Parents: 391da36
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Mar 8 10:43:17 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Mar 8 10:43:17 2016 +0900

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/DFSOutputStream.java   | 9 +++++++--
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java  | 8 ++++----
 2 files changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/352d299c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 1c58b28..dc88e08 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -770,14 +770,19 @@ public class DFSOutputStream extends FSOutputSummer
       flushInternal();             // flush all data to Datanodes
       // get last block before destroying the streamer
       ExtendedBlock lastBlock = getStreamer().getBlock();
-      closeThreads(false);
+
       try (TraceScope ignored =
                dfsClient.getTracer().newScope("completeFile")) {
         completeFile(lastBlock);
       }
     } catch (ClosedChannelException ignored) {
     } finally {
-      setClosed();
+      // Failures may happen when flushing data.
+      // Streamers may keep waiting for the new block information.
+      // Thus need to force closing these threads.
+      // Don't need to call setClosed() because closeThreads(true)
+      // calls setClosed() in the finally block.
+      closeThreads(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/352d299c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 9d3cb55..9ae443d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -507,7 +507,7 @@ class DataStreamer extends Daemon {
   }
 
   protected void endBlock() {
-    LOG.debug("Closing old block " + block);
+    LOG.debug("Closing old block {}", block);
     this.setName("DataStreamer for file " + src);
     closeResponder();
     closeStream();
@@ -591,7 +591,7 @@ class DataStreamer extends Daemon {
           LOG.debug("stage=" + stage + ", " + this);
         }
         if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
-          LOG.debug("Allocating new block: " + this);
+          LOG.debug("Allocating new block: {}", this);
           setPipeline(nextBlockOutputStream());
           initDataStreaming();
         } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
@@ -644,7 +644,7 @@ class DataStreamer extends Daemon {
           }
         }
 
-        LOG.debug(this + " sending " + one);
+        LOG.debug("{} sending {}", this, one);
 
         // write out data to remote datanode
         try (TraceScope ignored = dfsClient.getTracer().
@@ -1766,7 +1766,7 @@ class DataStreamer extends Daemon {
       packet.addTraceParent(Tracer.getCurrentSpanId());
       dataQueue.addLast(packet);
       lastQueuedSeqno = packet.getSeqno();
-      LOG.debug("Queued " + packet + ", " + this);
+      LOG.debug("Queued {}, {}", packet, this);
       dataQueue.notifyAll();
     }
   }


[20/34] hadoop git commit: YARN-4764. Application submission fails when submitted queue is not available in scheduler xml. Contributed by Bibin A Chundatt

Posted by ar...@apache.org.
YARN-4764. Application submission fails when submitted queue is not available in scheduler xml. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c33158d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c33158d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c33158d

Branch: refs/heads/HDFS-1312
Commit: 3c33158d1cb38ee4ab3baa21752a3cdf0bdc8ccc
Parents: a14a6f0
Author: Jian He <ji...@apache.org>
Authored: Tue Mar 8 13:07:57 2016 -0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Mar 8 13:07:57 2016 -0800

----------------------------------------------------------------------
 .../server/resourcemanager/RMAppManager.java    | 45 +++++++++++---------
 .../resourcemanager/TestApplicationACLs.java    | 36 ++++++++++++++++
 2 files changed, 60 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c33158d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7d6120f..78d6ebe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -17,7 +17,11 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.LinkedList;
+import java.util.Map;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -55,14 +59,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.LinkedList;
-import java.util.Map;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * This class manages the list of applications for the resource manager. 
@@ -360,22 +362,23 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
     // mapping should be done outside scheduler too like CS.
     // For now, exclude FS for the acl check.
     if (!isRecovery && YarnConfiguration.isAclEnabled(conf)
-        && scheduler instanceof CapacityScheduler &&
-        !authorizer.checkPermission(new AccessRequest(
-            ((CapacityScheduler) scheduler)
-                .getQueue(submissionContext.getQueue()).getPrivilegedEntity(),
-            userUgi, SchedulerUtils.toAccessType(QueueACL.SUBMIT_APPLICATIONS),
-            submissionContext.getApplicationId().toString(),
-            submissionContext.getApplicationName())) &&
-        !authorizer.checkPermission(new AccessRequest(
-            ((CapacityScheduler) scheduler)
-                .getQueue(submissionContext.getQueue()).getPrivilegedEntity(),
-            userUgi, SchedulerUtils.toAccessType(QueueACL.ADMINISTER_QUEUE),
-            submissionContext.getApplicationId().toString(),
-            submissionContext.getApplicationName()))) {
-      throw new AccessControlException(
-          "User " + user + " does not have permission to submit "
-              + applicationId + " to queue " + submissionContext.getQueue());
+        && scheduler instanceof CapacityScheduler) {
+      String queueName = submissionContext.getQueue();
+      String appName = submissionContext.getApplicationName();
+      CSQueue csqueue = ((CapacityScheduler) scheduler).getQueue(queueName);
+      if (null != csqueue
+          && !authorizer.checkPermission(
+              new AccessRequest(csqueue.getPrivilegedEntity(), userUgi,
+                  SchedulerUtils.toAccessType(QueueACL.SUBMIT_APPLICATIONS),
+                  applicationId.toString(), appName))
+          && !authorizer.checkPermission(
+              new AccessRequest(csqueue.getPrivilegedEntity(), userUgi,
+                  SchedulerUtils.toAccessType(QueueACL.ADMINISTER_QUEUE),
+                  applicationId.toString(), appName))) {
+        throw new AccessControlException(
+            "User " + user + " does not have permission to submit "
+                + applicationId + " to queue " + submissionContext.getQueue());
+      }
     }
 
     // Create RMApp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c33158d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
index ea0d448..e4befa6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
@@ -179,6 +180,8 @@ public class TestApplicationACLs {
     verifyEnemyAccess();
 
     verifyAdministerQueueUserAccess();
+
+    verifyInvalidQueueWithAcl();
   }
 
   @SuppressWarnings("deprecation")
@@ -392,6 +395,39 @@ public class TestApplicationACLs {
         -1, usageReport.getNeededResources().getMemory());
   }
 
+  private void verifyInvalidQueueWithAcl() throws Exception {
+    isQueueUser = true;
+    SubmitApplicationRequest submitRequest =
+        recordFactory.newRecordInstance(SubmitApplicationRequest.class);
+    ApplicationSubmissionContext context =
+        recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
+    ApplicationId applicationId = rmClient
+        .getNewApplication(
+            recordFactory.newRecordInstance(GetNewApplicationRequest.class))
+        .getApplicationId();
+    context.setApplicationId(applicationId);
+    Map<ApplicationAccessType, String> acls =
+        new HashMap<ApplicationAccessType, String>();
+    ContainerLaunchContext amContainer =
+        recordFactory.newRecordInstance(ContainerLaunchContext.class);
+    Resource resource = BuilderUtils.newResource(1024, 1);
+    context.setResource(resource);
+    amContainer.setApplicationACLs(acls);
+    context.setQueue("InvalidQueue");
+    context.setAMContainerSpec(amContainer);
+    submitRequest.setApplicationSubmissionContext(context);
+    rmClient.submitApplication(submitRequest);
+    resourceManager.waitForState(applicationId, RMAppState.FAILED);
+    final GetApplicationReportRequest appReportRequest =
+        recordFactory.newRecordInstance(GetApplicationReportRequest.class);
+    appReportRequest.setApplicationId(applicationId);
+    GetApplicationReportResponse applicationReport =
+        rmClient.getApplicationReport(appReportRequest);
+    ApplicationReport appReport = applicationReport.getApplicationReport();
+    Assert.assertTrue(appReport.getDiagnostics()
+        .contains("submitted by user owner to unknown queue: InvalidQueue"));
+  }
+
   private void verifyAdministerQueueUserAccess() throws Exception {
     isQueueUser = true;
     AccessControlList viewACL = new AccessControlList("");


[13/34] hadoop git commit: HADOOP-12901. Add warning log when KMSClientProvider cannot create a connection to the KMS server. (Xiao Chen via wang)

Posted by ar...@apache.org.
HADOOP-12901. Add warning log when KMSClientProvider cannot create a connection to the KMS server. (Xiao Chen via wang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391da36d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391da36d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391da36d

Branch: refs/heads/HDFS-1312
Commit: 391da36d93358038c50c15d91543f6c765fa0471
Parents: 724d229
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Mar 7 14:00:02 2016 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Mar 7 14:00:02 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java  | 8 ++++++++
 1 file changed, 8 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391da36d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 7682888..b894c7f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthentica
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.client.utils.URIBuilder;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.net.ssl.HttpsURLConnection;
 
@@ -85,6 +87,9 @@ import com.google.common.base.Strings;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(KMSClientProvider.class);
+
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous requests are disallowed";
@@ -491,6 +496,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
         }
       });
     } catch (IOException ex) {
+      if (ex instanceof SocketTimeoutException) {
+        LOG.warn("Failed to connect to {}:{}", url.getHost(), url.getPort());
+      }
       throw ex;
     } catch (UndeclaredThrowableException ex) {
       throw new IOException(ex.getUndeclaredThrowable());


[11/34] hadoop git commit: YARN-4762. Fixed CgroupHandler's creation and usage to avoid NodeManagers crashing when LinuxContainerExecutor is enabled. (Sidharta Seethana via vinodkv)

Posted by ar...@apache.org.
YARN-4762. Fixed CgroupHandler's creation and usage to avoid NodeManagers crashing when LinuxContainerExecutor is enabled. (Sidharta Seethana via vinodkv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2661765
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2661765
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2661765

Branch: refs/heads/HDFS-1312
Commit: b2661765a5a48392a5691cee15904ed2de147b00
Parents: d718fc1
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Mon Mar 7 11:08:17 2016 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Mon Mar 7 11:08:17 2016 -0800

----------------------------------------------------------------------
 .../linux/resources/ResourceHandlerModule.java  | 27 +++++++++++++++-----
 .../DelegatingLinuxContainerRuntime.java        | 13 +---------
 .../runtime/DockerLinuxContainerRuntime.java    | 27 +++++++++++++++++++-
 .../runtime/TestDockerContainerRuntime.java     | 15 +++++++++++
 4 files changed, 62 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2661765/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
index 7507a82..7fc04bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
@@ -63,7 +63,7 @@ public class ResourceHandlerModule {
   /**
    * Returns an initialized, thread-safe CGroupsHandler instance.
    */
-  public static CGroupsHandler getCGroupsHandler(Configuration conf)
+  private static CGroupsHandler getInitializedCGroupsHandler(Configuration conf)
       throws ResourceHandlerException {
     if (cGroupsHandler == null) {
       synchronized (CGroupsHandler.class) {
@@ -77,7 +77,17 @@ public class ResourceHandlerModule {
     return cGroupsHandler;
   }
 
-  private static CGroupsCpuResourceHandlerImpl getcGroupsCpuResourceHandler(
+  /**
+   * Returns a (possibly null) reference to a cGroupsHandler. This handler is
+   * non-null only if one or more of the known cgroups-based resource
+   * handlers are in use and have been initialized.
+   */
+
+  public static CGroupsHandler getCGroupsHandler() {
+    return cGroupsHandler;
+  }
+
+  private static CGroupsCpuResourceHandlerImpl getCGroupsCpuResourceHandler(
       Configuration conf) throws ResourceHandlerException {
     boolean cgroupsCpuEnabled =
         conf.getBoolean(YarnConfiguration.NM_CPU_RESOURCE_ENABLED,
@@ -92,7 +102,8 @@ public class ResourceHandlerModule {
           if (cGroupsCpuResourceHandler == null) {
             LOG.debug("Creating new cgroups cpu handler");
             cGroupsCpuResourceHandler =
-                new CGroupsCpuResourceHandlerImpl(getCGroupsHandler(conf));
+                new CGroupsCpuResourceHandlerImpl(
+                    getInitializedCGroupsHandler(conf));
             return cGroupsCpuResourceHandler;
           }
         }
@@ -112,7 +123,7 @@ public class ResourceHandlerModule {
             LOG.debug("Creating new traffic control bandwidth handler");
             trafficControlBandwidthHandler = new
                 TrafficControlBandwidthHandlerImpl(PrivilegedOperationExecutor
-                .getInstance(conf), getCGroupsHandler(conf),
+                .getInstance(conf), getInitializedCGroupsHandler(conf),
                 new TrafficController(conf, PrivilegedOperationExecutor
                     .getInstance(conf)));
           }
@@ -147,7 +158,8 @@ public class ResourceHandlerModule {
         if (cGroupsBlkioResourceHandler == null) {
           LOG.debug("Creating new cgroups blkio handler");
           cGroupsBlkioResourceHandler =
-              new CGroupsBlkioResourceHandlerImpl(getCGroupsHandler(conf));
+              new CGroupsBlkioResourceHandlerImpl(
+                  getInitializedCGroupsHandler(conf));
         }
       }
     }
@@ -170,7 +182,8 @@ public class ResourceHandlerModule {
       synchronized (MemoryResourceHandler.class) {
         if (cGroupsMemoryResourceHandler == null) {
           cGroupsMemoryResourceHandler =
-              new CGroupsMemoryResourceHandlerImpl(getCGroupsHandler(conf));
+              new CGroupsMemoryResourceHandlerImpl(
+                  getInitializedCGroupsHandler(conf));
         }
       }
     }
@@ -191,7 +204,7 @@ public class ResourceHandlerModule {
     addHandlerIfNotNull(handlerList, getOutboundBandwidthResourceHandler(conf));
     addHandlerIfNotNull(handlerList, getDiskResourceHandler(conf));
     addHandlerIfNotNull(handlerList, getMemoryResourceHandler(conf));
-    addHandlerIfNotNull(handlerList, getcGroupsCpuResourceHandler(conf));
+    addHandlerIfNotNull(handlerList, getCGroupsCpuResourceHandler(conf));
     resourceHandlerChain = new ResourceHandlerChain(handlerList);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2661765/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
index 7adba4d..75abfb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
@@ -27,9 +27,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
 
@@ -48,19 +45,11 @@ public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     PrivilegedOperationExecutor privilegedOperationExecutor =
         PrivilegedOperationExecutor.getInstance(conf);
-    CGroupsHandler cGroupsHandler;
-    try {
-      cGroupsHandler = ResourceHandlerModule.getCGroupsHandler(conf);
-    } catch (ResourceHandlerException e) {
-      LOG.error("Unable to get cgroups handle.");
-      throw new ContainerExecutionException(e);
-    }
-
     defaultLinuxContainerRuntime = new DefaultLinuxContainerRuntime(
         privilegedOperationExecutor);
     defaultLinuxContainerRuntime.initialize(conf);
     dockerLinuxContainerRuntime = new DockerLinuxContainerRuntime(
-        privilegedOperationExecutor, cGroupsHandler);
+        privilegedOperationExecutor);
     dockerLinuxContainerRuntime.initialize(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2661765/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 2b4fc79..c66189d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -36,6 +37,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileg
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerClient;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerRunCommand;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
@@ -89,9 +91,24 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   }
 
   public DockerLinuxContainerRuntime(PrivilegedOperationExecutor
+      privilegedOperationExecutor) {
+    this(privilegedOperationExecutor, ResourceHandlerModule
+        .getCGroupsHandler());
+  }
+
+  //A constructor with an injected cGroupsHandler primarily used for testing.
+  @VisibleForTesting
+  public DockerLinuxContainerRuntime(PrivilegedOperationExecutor
       privilegedOperationExecutor, CGroupsHandler cGroupsHandler) {
     this.privilegedOperationExecutor = privilegedOperationExecutor;
-    this.cGroupsHandler = cGroupsHandler;
+
+    if (cGroupsHandler == null) {
+      if (LOG.isInfoEnabled()) {
+        LOG.info("cGroupsHandler is null - cgroups not in use.");
+      }
+    } else {
+      this.cGroupsHandler = cGroupsHandler;
+    }
   }
 
   @Override
@@ -113,6 +130,14 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   public void addCGroupParentIfRequired(String resourcesOptions,
       String containerIdStr, DockerRunCommand runCommand)
       throws ContainerExecutionException {
+    if (cGroupsHandler == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("cGroupsHandler is null. cgroups are not in use. nothing to"
+            + " do.");
+      }
+      return;
+    }
+
     if (resourcesOptions.equals(
         (PrivilegedOperation.CGROUP_ARG_PREFIX + PrivilegedOperation
             .CGROUP_ARG_NO_TASKS))) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2661765/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 6898634..e05719c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -429,5 +429,20 @@ public class TestDockerContainerRuntime {
     //--cgroup-parent should be added for the containerId in question
     String expectedPath = "/" + hierarchy + "/" + containerIdStr;
     Mockito.verify(command).setCGroupParent(expectedPath);
+
+    //create a runtime with a 'null' cgroups handler - i.e no
+    // cgroup-based resource handlers are in use.
+
+    runtime = new DockerLinuxContainerRuntime
+        (mockExecutor, null);
+    runtime.initialize(conf);
+
+    runtime.addCGroupParentIfRequired(resourceOptionsNone, containerIdStr,
+        command);
+    runtime.addCGroupParentIfRequired(resourceOptionsCpu, containerIdStr,
+        command);
+
+    //no --cgroup-parent should be added in either case
+    Mockito.verifyZeroInteractions(command);
   }
 }


[19/34] hadoop git commit: HDFS-9919. TestStandbyCheckpoints#testNonPrimarySBNUploadFSImage waitForCheckpoint incorrectly. Contributed by Lin Yiqun.

Posted by ar...@apache.org.
HDFS-9919. TestStandbyCheckpoints#testNonPrimarySBNUploadFSImage waitForCheckpoint incorrectly. Contributed by Lin Yiqun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a14a6f08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a14a6f08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a14a6f08

Branch: refs/heads/HDFS-1312
Commit: a14a6f08ee9404168affe91affd095e349630971
Parents: 743a99f
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Mar 8 11:51:06 2016 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Mar 8 11:54:50 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a14a6f08/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index bdeeab5..234bc7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -485,7 +485,7 @@ public class TestStandbyCheckpoints {
     for (int i = 0; i < NUM_NNS; i++) {
       // Once the standby catches up, it should do a checkpoint
       // and save to local directories.
-      HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
+      HATestUtil.waitForCheckpoint(cluster, i, ImmutableList.of(12));
     }
 
     cluster.transitionToActive(0);


[07/34] hadoop git commit: HDFS-9521. TransferFsImage.receiveFile should account and log separate times for image download and fsync to disk. Contributed by Wellington Chevreuil

Posted by ar...@apache.org.
HDFS-9521. TransferFsImage.receiveFile should account and log separate times for image download and fsync to disk. Contributed by Wellington Chevreuil


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd1c09be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd1c09be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd1c09be

Branch: refs/heads/HDFS-1312
Commit: fd1c09be3e7c67c188a1dd7e4fccb3d92dcc5b5b
Parents: 8ed2e06
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Mar 7 13:49:47 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Mar 7 17:14:51 2016 +0530

----------------------------------------------------------------------
 .../hdfs/server/namenode/TransferFsImage.java   | 33 ++++++++++++++++----
 1 file changed, 27 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd1c09be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index eda6303..0186d8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -30,6 +30,7 @@ import java.net.URL;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -481,6 +482,9 @@ public class TransferFsImage {
       MD5Hash advertisedDigest, String fsImageName, InputStream stream,
       DataTransferThrottler throttler) throws IOException {
     long startTime = Time.monotonicNow();
+    Map<FileOutputStream, File> streamPathMap = new HashMap<>();
+    StringBuilder xferStats = new StringBuilder();
+    double xferCombined = 0;
     if (localPaths != null) {
       // If the local paths refer to directories, use the server-provided header
       // as the filename within that directory
@@ -517,7 +521,9 @@ public class TransferFsImage {
               LOG.warn("Overwriting existing file " + f
                   + " with file downloaded from " + url);
             }
-            outputStreams.add(new FileOutputStream(f));
+            FileOutputStream fos = new FileOutputStream(f);
+            outputStreams.add(fos);
+            streamPathMap.put(fos, f);
           } catch (IOException ioe) {
             LOG.warn("Unable to download file " + f, ioe);
             // This will be null if we're downloading the fsimage to a file
@@ -550,11 +556,26 @@ public class TransferFsImage {
         }
       }
       finishedReceiving = true;
+      double xferSec = Math.max(
+                 ((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
+      long xferKb = received / 1024;
+      xferCombined += xferSec;
+      xferStats.append(
+          String.format(" The fsimage download took %.2fs at %.2f KB/s.",
+              xferSec, xferKb / xferSec));
     } finally {
       stream.close();
       for (FileOutputStream fos : outputStreams) {
+        long flushStartTime = Time.monotonicNow();
         fos.getChannel().force(true);
         fos.close();
+        double writeSec = Math.max(((float)
+               (flushStartTime - Time.monotonicNow())) / 1000.0, 0.001);
+        xferCombined += writeSec;
+        xferStats.append(String
+                .format(" Synchronous (fsync) write to disk of " +
+                 streamPathMap.get(fos).getAbsolutePath() +
+                " took %.2fs.", writeSec));
       }
 
       // Something went wrong and did not finish reading.
@@ -573,11 +594,11 @@ public class TransferFsImage {
                               advertisedSize);
       }
     }
-    double xferSec = Math.max(
-        ((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
-    long xferKb = received / 1024;
-    LOG.info(String.format("Transfer took %.2fs at %.2f KB/s",
-        xferSec, xferKb / xferSec));
+    xferStats.insert(
+        0, String.format(
+            "Combined time for fsimage download and fsync " +
+            "to all disks took %.2fs.", xferCombined));
+    LOG.info(xferStats.toString());
 
     if (digester != null) {
       MD5Hash computedDigest = new MD5Hash(digester.digest());


[26/34] hadoop git commit: HADOOP-12798. Update changelog and release notes (2016-03-04) (aw)

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/CHANGES.0.7.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/CHANGES.0.7.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/CHANGES.0.7.2.md
index 49b17dc..6c84a2a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/CHANGES.0.7.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/CHANGES.0.7.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/RELEASENOTES.0.7.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/RELEASENOTES.0.7.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/RELEASENOTES.0.7.2.md
index 747f6da..a93c6b9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/RELEASENOTES.0.7.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.2/RELEASENOTES.0.7.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.7.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/CHANGES.0.8.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/CHANGES.0.8.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/CHANGES.0.8.0.md
index 4e2e9ea..4db1e35 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/CHANGES.0.8.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/CHANGES.0.8.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/RELEASENOTES.0.8.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/RELEASENOTES.0.8.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/RELEASENOTES.0.8.0.md
index 120f6a5..a0980d4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/RELEASENOTES.0.8.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.8.0/RELEASENOTES.0.8.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.8.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/CHANGES.0.9.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/CHANGES.0.9.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/CHANGES.0.9.0.md
index d0247a5..2b0e48c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/CHANGES.0.9.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/CHANGES.0.9.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/RELEASENOTES.0.9.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/RELEASENOTES.0.9.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/RELEASENOTES.0.9.0.md
index e031d5b..735385d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/RELEASENOTES.0.9.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.0/RELEASENOTES.0.9.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.9.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/CHANGES.0.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/CHANGES.0.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/CHANGES.0.9.1.md
index 925918f..8a3de80 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/CHANGES.0.9.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/CHANGES.0.9.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/RELEASENOTES.0.9.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/RELEASENOTES.0.9.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/RELEASENOTES.0.9.1.md
index fad93ae..d3c2c3b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/RELEASENOTES.0.9.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.1/RELEASENOTES.0.9.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.9.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/CHANGES.0.9.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/CHANGES.0.9.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/CHANGES.0.9.2.md
index e248210..ab1426f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/CHANGES.0.9.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/CHANGES.0.9.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/RELEASENOTES.0.9.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/RELEASENOTES.0.9.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/RELEASENOTES.0.9.2.md
index 5e91e58..0d86cbf 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/RELEASENOTES.0.9.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.9.2/RELEASENOTES.0.9.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.9.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/CHANGES.1.0.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/CHANGES.1.0.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/CHANGES.1.0.0.md
index 3b35420..eb9a382 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/CHANGES.1.0.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/CHANGES.1.0.0.md
@@ -27,6 +27,12 @@
 | [HDFS-617](https://issues.apache.org/jira/browse/HDFS-617) | Support for non-recursive create() in HDFS |  Major | hdfs-client, namenode | Kan Zhang | Kan Zhang |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/RELEASENOTES.1.0.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/RELEASENOTES.1.0.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/RELEASENOTES.1.0.0.md
index 0ec8c49..1aa12cb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/RELEASENOTES.1.0.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.0/RELEASENOTES.1.0.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.0.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/CHANGES.1.0.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/CHANGES.1.0.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/CHANGES.1.0.1.md
index 161abc0..912958b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/CHANGES.1.0.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/CHANGES.1.0.1.md
@@ -28,6 +28,12 @@
 | [HADOOP-7470](https://issues.apache.org/jira/browse/HADOOP-7470) | move up to Jackson 1.8.8 |  Minor | util | Steve Loughran | Enis Soztutar |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/RELEASENOTES.1.0.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/RELEASENOTES.1.0.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/RELEASENOTES.1.0.1.md
index 1745f2b..01a464c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/RELEASENOTES.1.0.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.1/RELEASENOTES.1.0.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.0.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/CHANGES.1.0.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/CHANGES.1.0.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/CHANGES.1.0.2.md
index 7c2cfab..7854534 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/CHANGES.1.0.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/CHANGES.1.0.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/RELEASENOTES.1.0.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/RELEASENOTES.1.0.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/RELEASENOTES.1.0.2.md
index 5352f62..d09f290 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/RELEASENOTES.1.0.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.2/RELEASENOTES.1.0.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.0.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/CHANGES.1.0.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/CHANGES.1.0.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/CHANGES.1.0.3.md
index f12cd21..49b0172 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/CHANGES.1.0.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/CHANGES.1.0.3.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/RELEASENOTES.1.0.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/RELEASENOTES.1.0.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/RELEASENOTES.1.0.3.md
index 7652dbf..e94bbf9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/RELEASENOTES.1.0.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.3/RELEASENOTES.1.0.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.0.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/CHANGES.1.0.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/CHANGES.1.0.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/CHANGES.1.0.4.md
index 6325e46..bcda2d0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/CHANGES.1.0.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/CHANGES.1.0.4.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/RELEASENOTES.1.0.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/RELEASENOTES.1.0.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/RELEASENOTES.1.0.4.md
index fa14781..aaab663 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/RELEASENOTES.1.0.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.0.4/RELEASENOTES.1.0.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.0.4 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/CHANGES.1.1.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/CHANGES.1.1.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/CHANGES.1.1.0.md
index c4caca7..a475bb3 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/CHANGES.1.1.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/CHANGES.1.1.0.md
@@ -35,6 +35,12 @@
 | [HDFS-2617](https://issues.apache.org/jira/browse/HDFS-2617) | Replaced Kerberized SSL for image transfer and fsck with SPNEGO-based solution |  Major | security | Jakob Homan | Jakob Homan |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/RELEASENOTES.1.1.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/RELEASENOTES.1.1.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/RELEASENOTES.1.1.0.md
index 6caadbf..53c7939 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/RELEASENOTES.1.1.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.0/RELEASENOTES.1.1.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.1.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -60,7 +60,7 @@ Append is not supported in Hadoop 1.x. Please upgrade to 2.x if you need append.
 
 * [HADOOP-6995](https://issues.apache.org/jira/browse/HADOOP-6995) | *Minor* | **Allow wildcards to be used in ProxyUsers configurations**
 
-When configuring proxy users and hosts, the special wildcard value "*" may be specified to match any host or any user.
+When configuring proxy users and hosts, the special wildcard value "\*" may be specified to match any host or any user.
 
 
 ---
@@ -87,7 +87,7 @@ Please see hdfs-default.xml for detailed description.
 
 This jira adds a new DataNode state called "stale" at the NameNode. DataNodes are marked as stale if it does not send heartbeat message to NameNode within the timeout configured using the configuration parameter "dfs.namenode.stale.datanode.interval" in seconds (default value is 30 seconds). NameNode picks a stale datanode as the last target to read from when returning block locations for reads.
 
-This feature is by default turned * off *. To turn on the feature, set the HDFS configuration "dfs.namenode.check.stale.datanode" to true.
+This feature is by default turned \* off \*. To turn on the feature, set the HDFS configuration "dfs.namenode.check.stale.datanode" to true.
 
 
 ---
@@ -182,7 +182,7 @@ Fixes the issue of GenerateDistCacheData  job slowness.
 
 * [MAPREDUCE-3597](https://issues.apache.org/jira/browse/MAPREDUCE-3597) | *Major* | **Provide a way to access other info of history file from Rumentool**
 
-Rumen now provides {{Parsed*}} objects. These objects provide extra information that are not provided by {{Logged*}} objects.
+Rumen now provides {{Parsed\*}} objects. These objects provide extra information that are not provided by {{Logged\*}} objects.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/CHANGES.1.1.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/CHANGES.1.1.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/CHANGES.1.1.1.md
index f2685d4..315a43a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/CHANGES.1.1.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/CHANGES.1.1.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/RELEASENOTES.1.1.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/RELEASENOTES.1.1.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/RELEASENOTES.1.1.1.md
index c1523c3..2f0a88a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/RELEASENOTES.1.1.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.1/RELEASENOTES.1.1.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.1.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/CHANGES.1.1.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/CHANGES.1.1.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/CHANGES.1.1.2.md
index ab774fb..100f23a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/CHANGES.1.1.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/CHANGES.1.1.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/RELEASENOTES.1.1.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/RELEASENOTES.1.1.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/RELEASENOTES.1.1.2.md
index 0570d73..b55f17d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/RELEASENOTES.1.1.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.2/RELEASENOTES.1.1.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.1.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/CHANGES.1.1.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/CHANGES.1.1.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/CHANGES.1.1.3.md
index 8765b23..86aeb5d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/CHANGES.1.1.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/CHANGES.1.1.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 1.1.3 - Unreleased
+## Release 1.1.3 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/RELEASENOTES.1.1.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/RELEASENOTES.1.1.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/RELEASENOTES.1.1.3.md
index dfce0e8..1e229d3 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/RELEASENOTES.1.1.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.1.3/RELEASENOTES.1.1.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.1.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/CHANGES.1.2.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/CHANGES.1.2.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/CHANGES.1.2.0.md
index 6c82ffb..ceb86d0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/CHANGES.1.2.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/CHANGES.1.2.0.md
@@ -31,6 +31,12 @@
 | [MAPREDUCE-4629](https://issues.apache.org/jira/browse/MAPREDUCE-4629) | Remove JobHistory.DEBUG\_MODE |  Major | . | Karthik Kambatla | Karthik Kambatla |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/RELEASENOTES.1.2.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/RELEASENOTES.1.2.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/RELEASENOTES.1.2.0.md
index 9f1e705..3fa573c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/RELEASENOTES.1.2.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.0/RELEASENOTES.1.2.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.2.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -46,7 +46,7 @@ This patch should be checked in together (or after) with JIRA Hadoop-8469: https
 
 * [HADOOP-8164](https://issues.apache.org/jira/browse/HADOOP-8164) | *Major* | **Handle paths using back slash as path separator for windows only**
 
-This jira only allows providing paths using back slash as separator on Windows. The back slash on *nix system will be used as escape character. The support for paths using back slash as path separator will be removed in HADOOP-8139 in release 23.3.
+This jira only allows providing paths using back slash as separator on Windows. The back slash on \*nix system will be used as escape character. The support for paths using back slash as path separator will be removed in HADOOP-8139 in release 23.3.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/CHANGES.1.2.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/CHANGES.1.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/CHANGES.1.2.1.md
index 84a10aa..603e235 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/CHANGES.1.2.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/CHANGES.1.2.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/RELEASENOTES.1.2.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/RELEASENOTES.1.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/RELEASENOTES.1.2.1.md
index 8835bae..162d153 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/RELEASENOTES.1.2.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.1/RELEASENOTES.1.2.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.2.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/CHANGES.1.2.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/CHANGES.1.2.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/CHANGES.1.2.2.md
index 936cd81..08cf273 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/CHANGES.1.2.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/CHANGES.1.2.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 1.2.2 - Unreleased
+## Release 1.2.2 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/RELEASENOTES.1.2.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/RELEASENOTES.1.2.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/RELEASENOTES.1.2.2.md
index 9ecfb2d..55a7f6c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/RELEASENOTES.1.2.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.2.2/RELEASENOTES.1.2.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.2.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/CHANGES.1.3.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/CHANGES.1.3.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/CHANGES.1.3.0.md
index 04f639f..1a12646 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/CHANGES.1.3.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/CHANGES.1.3.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 1.3.0 - Unreleased
+## Release 1.3.0 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -27,6 +27,12 @@
 | [MAPREDUCE-5777](https://issues.apache.org/jira/browse/MAPREDUCE-5777) | Support utf-8 text with BOM (byte order marker) |  Major | . | bc Wong | zhihai xu |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/RELEASENOTES.1.3.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/RELEASENOTES.1.3.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/RELEASENOTES.1.3.0.md
index 7680068..a6cf8f8 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/RELEASENOTES.1.3.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/1.3.0/RELEASENOTES.1.3.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  1.3.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/CHANGES.2.0.0-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/CHANGES.2.0.0-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/CHANGES.2.0.0-alpha.md
index 52aa432..13179e4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/CHANGES.2.0.0-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/CHANGES.2.0.0-alpha.md
@@ -40,6 +40,12 @@
 | [HDFS-395](https://issues.apache.org/jira/browse/HDFS-395) | DFS Scalability: Incremental block reports |  Major | datanode, namenode | dhruba borthakur | Tomasz Nykiel |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/RELEASENOTES.2.0.0-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/RELEASENOTES.2.0.0-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/RELEASENOTES.2.0.0-alpha.md
index e013b64..06edd6d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/RELEASENOTES.2.0.0-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.0-alpha/RELEASENOTES.2.0.0-alpha.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.0.0-alpha Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/CHANGES.2.0.1-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/CHANGES.2.0.1-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/CHANGES.2.0.1-alpha.md
index acd6e8f..4f2d9ff 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/CHANGES.2.0.1-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/CHANGES.2.0.1-alpha.md
@@ -27,6 +27,12 @@
 | [HADOOP-8552](https://issues.apache.org/jira/browse/HADOOP-8552) | Conflict: Same security.log.file for multiple users. |  Major | conf, security | Karthik Kambatla | Karthik Kambatla |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/RELEASENOTES.2.0.1-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/RELEASENOTES.2.0.1-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/RELEASENOTES.2.0.1-alpha.md
index ca57363..fc34b58 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/RELEASENOTES.2.0.1-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.1-alpha/RELEASENOTES.2.0.1-alpha.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.0.1-alpha Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/CHANGES.2.0.2-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/CHANGES.2.0.2-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/CHANGES.2.0.2-alpha.md
index a9bbab8..1d032c7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/CHANGES.2.0.2-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/CHANGES.2.0.2-alpha.md
@@ -44,6 +44,12 @@
 | [MAPREDUCE-3812](https://issues.apache.org/jira/browse/MAPREDUCE-3812) | Lower default allocation sizes, fix allocation configurations and document them |  Major | mrv2, performance | Vinod Kumar Vavilapalli | Harsh J |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/RELEASENOTES.2.0.2-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/RELEASENOTES.2.0.2-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/RELEASENOTES.2.0.2-alpha.md
index 91d0ddc..7073417 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/RELEASENOTES.2.0.2-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.2-alpha/RELEASENOTES.2.0.2-alpha.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.0.2-alpha Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/CHANGES.2.0.3-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/CHANGES.2.0.3-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/CHANGES.2.0.3-alpha.md
index 8843795..6ec5fbb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/CHANGES.2.0.3-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/CHANGES.2.0.3-alpha.md
@@ -39,6 +39,12 @@
 | [MAPREDUCE-4123](https://issues.apache.org/jira/browse/MAPREDUCE-4123) | ./mapred groups gives NoClassDefFoundError |  Critical | mrv2 | Nishan Shetty | Devaraj K |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/RELEASENOTES.2.0.3-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/RELEASENOTES.2.0.3-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/RELEASENOTES.2.0.3-alpha.md
index e8aaca6..f924b91 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/RELEASENOTES.2.0.3-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.3-alpha/RELEASENOTES.2.0.3-alpha.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.0.3-alpha Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -167,7 +167,7 @@ Please see hdfs-default.xml documentation corresponding to ""dfs.namenode.stale.
 
 This jira adds a new DataNode state called "stale" at the NameNode. DataNodes are marked as stale if it does not send heartbeat message to NameNode within the timeout configured using the configuration parameter "dfs.namenode.stale.datanode.interval" in seconds (default value is 30 seconds). NameNode picks a stale datanode as the last target to read from when returning block locations for reads.
 
-This feature is by default turned * off *. To turn on the feature, set the HDFS configuration "dfs.namenode.check.stale.datanode" to true.
+This feature is by default turned \* off \*. To turn on the feature, set the HDFS configuration "dfs.namenode.check.stale.datanode" to true.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/CHANGES.2.0.4-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/CHANGES.2.0.4-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/CHANGES.2.0.4-alpha.md
index ff53af9..0f1db1e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/CHANGES.2.0.4-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/CHANGES.2.0.4-alpha.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/RELEASENOTES.2.0.4-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/RELEASENOTES.2.0.4-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/RELEASENOTES.2.0.4-alpha.md
index 453d2a1..988dbe1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/RELEASENOTES.2.0.4-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.4-alpha/RELEASENOTES.2.0.4-alpha.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.0.4-alpha Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/CHANGES.2.0.5-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/CHANGES.2.0.5-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/CHANGES.2.0.5-alpha.md
index 09e191b..2aab255 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/CHANGES.2.0.5-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/CHANGES.2.0.5-alpha.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/RELEASENOTES.2.0.5-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/RELEASENOTES.2.0.5-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/RELEASENOTES.2.0.5-alpha.md
index 5ba1cbc..9ad4b51 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/RELEASENOTES.2.0.5-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.5-alpha/RELEASENOTES.2.0.5-alpha.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.0.5-alpha Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/CHANGES.2.0.6-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/CHANGES.2.0.6-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/CHANGES.2.0.6-alpha.md
index 5047be8..2383d4a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/CHANGES.2.0.6-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/CHANGES.2.0.6-alpha.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/RELEASENOTES.2.0.6-alpha.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/RELEASENOTES.2.0.6-alpha.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/RELEASENOTES.2.0.6-alpha.md
index 6ecfaa9..8bb549f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/RELEASENOTES.2.0.6-alpha.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.0.6-alpha/RELEASENOTES.2.0.6-alpha.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.0.6-alpha Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/CHANGES.2.1.0-beta.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/CHANGES.2.1.0-beta.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/CHANGES.2.1.0-beta.md
index 13c97f8..192229f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/CHANGES.2.1.0-beta.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/CHANGES.2.1.0-beta.md
@@ -109,6 +109,12 @@
 | [YARN-387](https://issues.apache.org/jira/browse/YARN-387) | Fix inconsistent protocol naming |  Blocker | . | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/RELEASENOTES.2.1.0-beta.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/RELEASENOTES.2.1.0-beta.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/RELEASENOTES.2.1.0-beta.md
index 0dbd6bd..639afd9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/RELEASENOTES.2.1.0-beta.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.0-beta/RELEASENOTES.2.1.0-beta.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.1.0-beta Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/CHANGES.2.1.1-beta.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/CHANGES.2.1.1-beta.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/CHANGES.2.1.1-beta.md
index 245bdd4..1e7747e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/CHANGES.2.1.1-beta.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/CHANGES.2.1.1-beta.md
@@ -29,6 +29,12 @@
 | [YARN-707](https://issues.apache.org/jira/browse/YARN-707) | Add user info in the YARN ClientToken |  Blocker | . | Bikas Saha | Jason Lowe |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/RELEASENOTES.2.1.1-beta.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/RELEASENOTES.2.1.1-beta.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/RELEASENOTES.2.1.1-beta.md
index db2f170..c1dd568 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/RELEASENOTES.2.1.1-beta.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.1.1-beta/RELEASENOTES.2.1.1-beta.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.1.1-beta Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/CHANGES.2.2.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/CHANGES.2.2.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/CHANGES.2.2.0.md
index e70a79a..e47ecd8 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/CHANGES.2.2.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/CHANGES.2.2.0.md
@@ -29,6 +29,12 @@
 | [YARN-1228](https://issues.apache.org/jira/browse/YARN-1228) | Clean up Fair Scheduler configuration loading |  Major | scheduler | Sandy Ryza | Sandy Ryza |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/RELEASENOTES.2.2.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/RELEASENOTES.2.2.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/RELEASENOTES.2.2.0.md
index 301e0c6..5132927 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/RELEASENOTES.2.2.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.0/RELEASENOTES.2.2.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.2.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/CHANGES.2.2.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/CHANGES.2.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/CHANGES.2.2.1.md
index 1f7eb2c..4584486 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/CHANGES.2.2.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/CHANGES.2.2.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 2.2.1 - Unreleased
+## Release 2.2.1 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/RELEASENOTES.2.2.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/RELEASENOTES.2.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/RELEASENOTES.2.2.1.md
index b4d1b73..ae5b29c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/RELEASENOTES.2.2.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.2.1/RELEASENOTES.2.2.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.2.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/CHANGES.2.3.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/CHANGES.2.3.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/CHANGES.2.3.0.md
index bf50e32..71d6e77 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/CHANGES.2.3.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/CHANGES.2.3.0.md
@@ -27,6 +27,12 @@
 | [HDFS-4997](https://issues.apache.org/jira/browse/HDFS-4997) | libhdfs doesn't return correct error codes in most cases |  Major | libhdfs | Colin Patrick McCabe | Colin Patrick McCabe |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |
@@ -34,7 +40,7 @@
 | [HADOOP-10047](https://issues.apache.org/jira/browse/HADOOP-10047) | Add a directbuffer Decompressor API to hadoop |  Major | io | Gopal V | Gopal V |
 | [HADOOP-9848](https://issues.apache.org/jira/browse/HADOOP-9848) | Create a MiniKDC for use with security testing |  Major | security, test | Wei Yan | Wei Yan |
 | [HADOOP-9618](https://issues.apache.org/jira/browse/HADOOP-9618) | Add thread which detects JVM pauses |  Major | util | Todd Lipcon | Todd Lipcon |
-| [HADOOP-9432](https://issues.apache.org/jira/browse/HADOOP-9432) | Add support for markdown .md files in site documentation |  Minor | build, documentation | Steve Loughran |  |
+| [HADOOP-9432](https://issues.apache.org/jira/browse/HADOOP-9432) | Add support for markdown .md files in site documentation |  Minor | build, documentation | Steve Loughran | Steve Loughran |
 | [HADOOP-8545](https://issues.apache.org/jira/browse/HADOOP-8545) | Filesystem Implementation for OpenStack Swift |  Major | fs | Tim Miller | Dmitry Mezhensky |
 | [HDFS-5703](https://issues.apache.org/jira/browse/HDFS-5703) | Add support for HTTPS and swebhdfs to HttpFS |  Major | webhdfs | Alejandro Abdelnur | Alejandro Abdelnur |
 | [HDFS-5260](https://issues.apache.org/jira/browse/HDFS-5260) | Merge zero-copy memory-mapped HDFS client reads to trunk and branch-2. |  Major | hdfs-client, libhdfs | Chris Nauroth | Chris Nauroth |
@@ -428,6 +434,7 @@
 | [YARN-1454](https://issues.apache.org/jira/browse/YARN-1454) | TestRMRestart.testRMDelegationTokenRestoredOnRMRestart is failing intermittently |  Critical | . | Jian He | Karthik Kambatla |
 | [YARN-1451](https://issues.apache.org/jira/browse/YARN-1451) | TestResourceManager relies on the scheduler assigning multiple containers in a single node update |  Minor | . | Sandy Ryza | Sandy Ryza |
 | [YARN-1450](https://issues.apache.org/jira/browse/YARN-1450) | TestUnmanagedAMLauncher#testDSShell fails on trunk |  Major | applications/distributed-shell | Akira AJISAKA | Binglin Chang |
+| [YARN-1438](https://issues.apache.org/jira/browse/YARN-1438) | When a container fails, the text of the exception isn't included in the diagnostics |  Major | nodemanager | Steve Loughran | Steve Loughran |
 | [YARN-1435](https://issues.apache.org/jira/browse/YARN-1435) | Distributed Shell should not run other commands except "sh", and run the custom script at the same time. |  Major | applications/distributed-shell | Tassapol Athiapinya | Xuan Gong |
 | [YARN-1425](https://issues.apache.org/jira/browse/YARN-1425) | TestRMRestart fails because MockRM.waitForState(AttemptId) uses current attempt instead of the attempt passed as argument |  Major | . | Omkar Vinit Joshi | Omkar Vinit Joshi |
 | [YARN-1419](https://issues.apache.org/jira/browse/YARN-1419) | TestFifoScheduler.testAppAttemptMetrics fails intermittently under jdk7 |  Minor | scheduler | Jonathan Eagles | Jonathan Eagles |
@@ -648,8 +655,8 @@
 | [YARN-427](https://issues.apache.org/jira/browse/YARN-427) | Coverage fix for org.apache.hadoop.yarn.server.api.\* |  Major | . | Aleksey Gorshkov | Aleksey Gorshkov |
 | [YARN-425](https://issues.apache.org/jira/browse/YARN-425) | coverage fix for yarn api |  Major | . | Aleksey Gorshkov | Aleksey Gorshkov |
 | [YARN-353](https://issues.apache.org/jira/browse/YARN-353) | Add Zookeeper-based store implementation for RMStateStore |  Major | resourcemanager | Hitesh Shah | Karthik Kambatla |
-| [YARN-312](https://issues.apache.org/jira/browse/YARN-312) | Add updateNodeResource in ResourceManagerAdministrationProtocol |  Major | api | Junping Du | Junping Du |
-| [YARN-311](https://issues.apache.org/jira/browse/YARN-311) | Dynamic node resource configuration: core scheduler changes |  Major | resourcemanager, scheduler | Junping Du | Junping Du |
+| [YARN-312](https://issues.apache.org/jira/browse/YARN-312) | Add updateNodeResource in ResourceManagerAdministrationProtocol |  Major | api, graceful | Junping Du | Junping Du |
+| [YARN-311](https://issues.apache.org/jira/browse/YARN-311) | Dynamic node resource configuration: core scheduler changes |  Major | graceful, resourcemanager, scheduler | Junping Du | Junping Du |
 | [YARN-7](https://issues.apache.org/jira/browse/YARN-7) | Add support for DistributedShell to ask for CPUs along with memory |  Major | . | Arun C Murthy | Junping Du |
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/RELEASENOTES.2.3.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/RELEASENOTES.2.3.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/RELEASENOTES.2.3.0.md
index 103cf96..43dc922 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/RELEASENOTES.2.3.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.3.0/RELEASENOTES.2.3.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.3.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -39,6 +39,7 @@ The 'du' (disk usage command from Unix) script refresh monitor is now configurab
 
 * [HADOOP-8545](https://issues.apache.org/jira/browse/HADOOP-8545) | *Major* | **Filesystem Implementation for OpenStack Swift**
 
+<!-- markdown -->
 Added file system implementation for OpenStack Swift.
 There are two implementation: block and native (similar to Amazon S3 integration).
 Data locality issue solved by patch in Swift, commit procedure to OpenStack is in progress.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/CHANGES.2.4.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/CHANGES.2.4.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/CHANGES.2.4.0.md
index 78bc092..06e9c9b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/CHANGES.2.4.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/CHANGES.2.4.0.md
@@ -33,6 +33,12 @@
 | [MAPREDUCE-5036](https://issues.apache.org/jira/browse/MAPREDUCE-5036) | Default shuffle handler port should not be 8080 |  Major | . | Sandy Ryza | Sandy Ryza |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |
@@ -392,6 +398,7 @@
 | [YARN-1719](https://issues.apache.org/jira/browse/YARN-1719) | ATSWebServices produces jersey warnings |  Major | . | Billie Rinaldi | Billie Rinaldi |
 | [YARN-1717](https://issues.apache.org/jira/browse/YARN-1717) | Enable offline deletion of entries in leveldb timeline store |  Major | . | Billie Rinaldi | Billie Rinaldi |
 | [YARN-1706](https://issues.apache.org/jira/browse/YARN-1706) | Create an utility function to dump timeline records to json |  Major | . | Zhijie Shen | Zhijie Shen |
+| [YARN-1705](https://issues.apache.org/jira/browse/YARN-1705) | Reset cluster-metrics on transition to standby |  Major | resourcemanager | Karthik Kambatla | Rohith Sharma K S |
 | [YARN-1704](https://issues.apache.org/jira/browse/YARN-1704) | Review LICENSE and NOTICE to reflect new levelDB releated libraries being used |  Blocker | . | Billie Rinaldi | Billie Rinaldi |
 | [YARN-1698](https://issues.apache.org/jira/browse/YARN-1698) | Replace MemoryApplicationTimelineStore with LeveldbApplicationTimelineStore as default |  Major | . | Zhijie Shen | Zhijie Shen |
 | [YARN-1690](https://issues.apache.org/jira/browse/YARN-1690) | Sending timeline entities+events from Distributed shell |  Major | . | Mayank Bansal | Mayank Bansal |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/RELEASENOTES.2.4.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/RELEASENOTES.2.4.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/RELEASENOTES.2.4.0.md
index 63fbfe4a..a86f1e0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/RELEASENOTES.2.4.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.0/RELEASENOTES.2.4.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.4.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/CHANGES.2.4.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/CHANGES.2.4.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/CHANGES.2.4.1.md
index 48d9480..9ad1697 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/CHANGES.2.4.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/CHANGES.2.4.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/RELEASENOTES.2.4.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/RELEASENOTES.2.4.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/RELEASENOTES.2.4.1.md
index 0425a58..ad1b358 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/RELEASENOTES.2.4.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.4.1/RELEASENOTES.2.4.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.4.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/CHANGES.2.5.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/CHANGES.2.5.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/CHANGES.2.5.0.md
index 82f8878..f62f96b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/CHANGES.2.5.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/CHANGES.2.5.0.md
@@ -31,6 +31,12 @@
 | [YARN-2107](https://issues.apache.org/jira/browse/YARN-2107) | Refactor timeline classes into server.timeline package |  Major | . | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |
@@ -467,7 +473,7 @@
 | [HADOOP-10426](https://issues.apache.org/jira/browse/HADOOP-10426) | CreateOpts.getOpt(..) should declare with generic type argument |  Minor | fs | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
 | [HADOOP-10279](https://issues.apache.org/jira/browse/HADOOP-10279) | Create multiplexer, a requirement for the fair queue |  Major | . | Chris Li | Chris Li |
 | [HADOOP-10104](https://issues.apache.org/jira/browse/HADOOP-10104) | Update jackson to 1.9.13 |  Minor | build | Steve Loughran | Akira AJISAKA |
-| [HADOOP-9712](https://issues.apache.org/jira/browse/HADOOP-9712) | Write contract tests for FTP filesystem, fix places where it breaks |  Minor | fs/s3 | Steve Loughran |  |
+| [HADOOP-9712](https://issues.apache.org/jira/browse/HADOOP-9712) | Write contract tests for FTP filesystem, fix places where it breaks |  Minor | fs/s3 | Steve Loughran | Steve Loughran |
 | [HADOOP-9711](https://issues.apache.org/jira/browse/HADOOP-9711) | Write contract tests for S3Native; fix places where it breaks |  Minor | fs/s3 | Steve Loughran | Steve Loughran |
 | [HADOOP-9371](https://issues.apache.org/jira/browse/HADOOP-9371) | Define Semantics of FileSystem more rigorously |  Major | fs | Steve Loughran | Steve Loughran |
 | [HDFS-6562](https://issues.apache.org/jira/browse/HDFS-6562) | Refactor rename() in FSDirectory |  Minor | namenode | Haohui Mai | Haohui Mai |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/RELEASENOTES.2.5.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/RELEASENOTES.2.5.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/RELEASENOTES.2.5.0.md
index 6f24358..43fb3fa 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/RELEASENOTES.2.5.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.0/RELEASENOTES.2.5.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.5.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -59,20 +59,6 @@ Remove MRv1 settings from hadoop-metrics2.properties, add YARN settings instead.
 
 ---
 
-* [HDFS-6471](https://issues.apache.org/jira/browse/HDFS-6471) | *Major* | **Make moveFromLocal CLI testcases to be non-disruptive**
-
-Committed to trunk and merged into branch-2. Thanks Dasha!
-
-
----
-
-* [HDFS-6297](https://issues.apache.org/jira/browse/HDFS-6297) | *Major* | **Add CLI testcases to reflect new features of dfs and dfsadmin**
-
-Committed to the trunk and branch-2. Thanks Dasha!
-
-
----
-
 * [HDFS-6293](https://issues.apache.org/jira/browse/HDFS-6293) | *Blocker* | **Issues with OIV processing PB-based fsimages**
 
 Set "dfs.namenode.legacy-oiv-image.dir" to an appropriate directory to make standby name node or secondary name node save its file system state in the old fsimage format during checkpointing. This image can be used for offline analysis using the OfflineImageViewer.  Use the "hdfs oiv\_legacy" command to process the old fsimage format.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/CHANGES.2.5.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/CHANGES.2.5.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/CHANGES.2.5.1.md
index 9c8e4b7..ee53649 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/CHANGES.2.5.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/CHANGES.2.5.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/RELEASENOTES.2.5.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/RELEASENOTES.2.5.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/RELEASENOTES.2.5.1.md
index ea5e750..1eb0b2a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/RELEASENOTES.2.5.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.1/RELEASENOTES.2.5.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.5.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/CHANGES.2.5.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/CHANGES.2.5.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/CHANGES.2.5.2.md
index 3406359..426a107 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/CHANGES.2.5.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/CHANGES.2.5.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/RELEASENOTES.2.5.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/RELEASENOTES.2.5.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/RELEASENOTES.2.5.2.md
index 7348c52..4734acf 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/RELEASENOTES.2.5.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.5.2/RELEASENOTES.2.5.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.5.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/CHANGES.2.6.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/CHANGES.2.6.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/CHANGES.2.6.0.md
index 85098fe..c4ce70e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/CHANGES.2.6.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/CHANGES.2.6.0.md
@@ -30,6 +30,12 @@
 | [YARN-668](https://issues.apache.org/jira/browse/YARN-668) | TokenIdentifier serialization should consider Unknown fields |  Blocker | . | Siddharth Seth | Junping Du |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |
@@ -120,7 +126,6 @@
 | [HADOOP-10696](https://issues.apache.org/jira/browse/HADOOP-10696) | Add optional attributes to KeyProvider Options and Metadata |  Major | security | Alejandro Abdelnur | Alejandro Abdelnur |
 | [HADOOP-10695](https://issues.apache.org/jira/browse/HADOOP-10695) | KMSClientProvider should respect a configurable timeout. |  Major | . | Andrew Wang | Mike Yoder |
 | [HADOOP-10675](https://issues.apache.org/jira/browse/HADOOP-10675) | Add server-side encryption functionality to s3a |  Major | fs/s3 | David S. Wang | David S. Wang |
-| [HADOOP-10620](https://issues.apache.org/jira/browse/HADOOP-10620) | /docs/current doesn't point to the latest version 2.4.0 |  Major | documentation | Jacek Laskowski |  |
 | [HADOOP-10610](https://issues.apache.org/jira/browse/HADOOP-10610) | Upgrade S3n fs.s3.buffer.dir to support multi directories |  Minor | fs/s3 | Ted Malaska | Ted Malaska |
 | [HADOOP-10433](https://issues.apache.org/jira/browse/HADOOP-10433) | Key Management Server based on KeyProvider API |  Major | security | Alejandro Abdelnur | Alejandro Abdelnur |
 | [HADOOP-10432](https://issues.apache.org/jira/browse/HADOOP-10432) | Refactor SSLFactory to expose static method to determine HostnameVerifier |  Major | security | Alejandro Abdelnur | Alejandro Abdelnur |
@@ -132,7 +137,7 @@
 | [HADOOP-10373](https://issues.apache.org/jira/browse/HADOOP-10373) | create tools/hadoop-amazon for aws/EMR support |  Major | fs/s3 | Steve Loughran | Steve Loughran |
 | [HADOOP-10335](https://issues.apache.org/jira/browse/HADOOP-10335) | An ip whilelist based implementation to resolve Sasl properties per connection |  Major | . | Benoy Antony | Benoy Antony |
 | [HADOOP-10231](https://issues.apache.org/jira/browse/HADOOP-10231) | Add some components in Native Libraries document |  Minor | documentation | Akira AJISAKA | Akira AJISAKA |
-| [HADOOP-9540](https://issues.apache.org/jira/browse/HADOOP-9540) | Expose the InMemoryS3 and S3N FilesystemStores implementations for Unit testing. |  Minor | fs/s3, test | Hari |  |
+| [HADOOP-9540](https://issues.apache.org/jira/browse/HADOOP-9540) | Expose the InMemoryS3 and S3N FilesystemStores implementations for Unit testing. |  Minor | fs/s3, test | Hari | Hari |
 | [HADOOP-9457](https://issues.apache.org/jira/browse/HADOOP-9457) | add an SCM-ignored XML filename to keep secrets in (auth-keys.xml?) |  Minor | build | Steve Loughran |  |
 | [HADOOP-8896](https://issues.apache.org/jira/browse/HADOOP-8896) | Javadoc points to Wrong Reader and Writer classes in SequenceFile |  Trivial | documentation, io | Timothy Mann | Ray Chiang |
 | [HADOOP-8815](https://issues.apache.org/jira/browse/HADOOP-8815) | RandomDatum overrides equals(Object) but no hashCode() |  Minor | test | Brandon Li | Brandon Li |
@@ -234,7 +239,7 @@
 | [YARN-1954](https://issues.apache.org/jira/browse/YARN-1954) | Add waitFor to AMRMClient(Async) |  Major | client | Zhijie Shen | Tsuyoshi Ozawa |
 | [YARN-1918](https://issues.apache.org/jira/browse/YARN-1918) | Typo in description and error message for 'yarn.resourcemanager.cluster-id' |  Trivial | . | Devaraj K | Anandha L Ranganathan |
 | [YARN-1769](https://issues.apache.org/jira/browse/YARN-1769) | CapacityScheduler:  Improve reservations |  Major | capacityscheduler | Thomas Graves | Thomas Graves |
-| [YARN-666](https://issues.apache.org/jira/browse/YARN-666) | [Umbrella] Support rolling upgrades in YARN |  Major | . | Siddharth Seth |  |
+| [YARN-666](https://issues.apache.org/jira/browse/YARN-666) | [Umbrella] Support rolling upgrades in YARN |  Major | graceful, rolling upgrade | Siddharth Seth |  |
 
 
 ### BUG FIXES:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/RELEASENOTES.2.6.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/RELEASENOTES.2.6.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/RELEASENOTES.2.6.0.md
index 756938e..064d74d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/RELEASENOTES.2.6.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.6.0/RELEASENOTES.2.6.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  2.6.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -44,13 +44,6 @@ Remove unnecessary synchronized blocks from Snappy/Zlib codecs.
 
 ---
 
-* [HADOOP-10620](https://issues.apache.org/jira/browse/HADOOP-10620) | *Major* | **/docs/current doesn't point to the latest version 2.4.0**
-
-Verified http://hadoop.apache.org/docs/current/ link now point to current release (v2.6.0).
-
-
----
-
 * [HADOOP-10583](https://issues.apache.org/jira/browse/HADOOP-10583) | *Minor* | **bin/hadoop key throws NPE with no args and assorted other fixups**
 
 bin/hadoop key
@@ -66,13 +59,6 @@ Fix of inappropriate test of delete functionality.
 
 ---
 
-* [HADOOP-10201](https://issues.apache.org/jira/browse/HADOOP-10201) | *Major* | **Add Listing Support to Key Management APIs**
-
-I just committed this. Thanks, Larry!
-
-
----
-
 * [HADOOP-8944](https://issues.apache.org/jira/browse/HADOOP-8944) | *Trivial* | **Shell command fs -count should include human readable option**
 
 Implements -h option for fs -count to show file sizes in human readable format. Additionally, ContentSummary.getHeader() now returns a different string that is incompatible with previous releases.
@@ -106,20 +92,6 @@ The time period in milliseconds that the allocation count for each array length
 
 ---
 
-* [HDFS-7091](https://issues.apache.org/jira/browse/HDFS-7091) | *Minor* | **Add forwarding constructor for INodeFile for existing callers**
-
-Thanks Nicholas! Revised title and committed to the feature branch.
-
-
----
-
-* [HDFS-7046](https://issues.apache.org/jira/browse/HDFS-7046) | *Critical* | **HA NN can NPE upon transition to active**
-
-Thanks for the reviews, gentlemen. It's been committed to trunk and branch-2.
-
-
----
-
 * [HDFS-6606](https://issues.apache.org/jira/browse/HDFS-6606) | *Major* | **Optimize HDFS Encrypted Transport performance**
 
 HDFS now supports the option to configure AES encryption for block data transfer.  AES offers improved cryptographic strength and performance over the prior options of 3DES and RC4.
@@ -155,13 +127,6 @@ The libhdfs C API is now supported on Windows.
 
 ---
 
-* [YARN-2830](https://issues.apache.org/jira/browse/YARN-2830) | *Blocker* | **Add backwords compatible ContainerId.newInstance constructor for use within Tez Local Mode**
-
-I just committed this. Thanks [~jeagles] for the patch and [~ozawa] for the reviews!
-
-
----
-
 * [YARN-2615](https://issues.apache.org/jira/browse/YARN-2615) | *Blocker* | **ClientToAMTokenIdentifier and DelegationTokenIdentifier should allow extended fields**
 
 **WARNING: No release note provided for this incompatible change.**


[15/34] hadoop git commit: HADOOP-12789. log classpath of ApplicationClassLoader at INFO level. (Sangjin Lee via mingma)

Posted by ar...@apache.org.
HADOOP-12789. log classpath of ApplicationClassLoader at INFO level. (Sangjin Lee via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49eedc7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49eedc7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49eedc7f

Branch: refs/heads/HDFS-1312
Commit: 49eedc7ff02ea61764f416f0e2ddf81370aec5fb
Parents: 352d299
Author: Ming Ma <mi...@apache.org>
Authored: Mon Mar 7 20:26:19 2016 -0800
Committer: Ming Ma <mi...@apache.org>
Committed: Mon Mar 7 20:26:19 2016 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/util/ApplicationClassLoader.java     | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49eedc7f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
index 6d37c28..8c1601a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
@@ -94,10 +94,6 @@ public class ApplicationClassLoader extends URLClassLoader {
   public ApplicationClassLoader(URL[] urls, ClassLoader parent,
       List<String> systemClasses) {
     super(urls, parent);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("urls: " + Arrays.toString(urls));
-      LOG.debug("system classes: " + systemClasses);
-    }
     this.parent = parent;
     if (parent == null) {
       throw new IllegalArgumentException("No parent classloader!");
@@ -106,6 +102,7 @@ public class ApplicationClassLoader extends URLClassLoader {
     this.systemClasses = (systemClasses == null || systemClasses.isEmpty()) ?
         Arrays.asList(StringUtils.getTrimmedStrings(SYSTEM_CLASSES_DEFAULT)) :
         systemClasses;
+    LOG.info("classpath: " + Arrays.toString(urls));
     LOG.info("system classes: " + this.systemClasses);
   }
 


[27/34] hadoop git commit: HADOOP-12798. Update changelog and release notes (2016-03-04) (aw)

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/RELEASENOTES.0.19.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/RELEASENOTES.0.19.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/RELEASENOTES.0.19.2.md
index 5cf2ebf..862304e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/RELEASENOTES.0.19.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/RELEASENOTES.0.19.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.19.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/CHANGES.0.2.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/CHANGES.0.2.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/CHANGES.0.2.0.md
index 9769965..72e7d42 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/CHANGES.0.2.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/CHANGES.0.2.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/RELEASENOTES.0.2.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/RELEASENOTES.0.2.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/RELEASENOTES.0.2.0.md
index 75182d1..a61abcf 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/RELEASENOTES.0.2.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.0/RELEASENOTES.0.2.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.2.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/CHANGES.0.2.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/CHANGES.0.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/CHANGES.0.2.1.md
index 19e53c2..f236e84 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/CHANGES.0.2.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/CHANGES.0.2.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/RELEASENOTES.0.2.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/RELEASENOTES.0.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/RELEASENOTES.0.2.1.md
index 0e5f033..903c992 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/RELEASENOTES.0.2.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.2.1/RELEASENOTES.0.2.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.2.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/CHANGES.0.20.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/CHANGES.0.20.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/CHANGES.0.20.0.md
index 2e03f9e..4c1dd51 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/CHANGES.0.20.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/CHANGES.0.20.0.md
@@ -52,6 +52,12 @@
 | [HADOOP-1650](https://issues.apache.org/jira/browse/HADOOP-1650) | Upgrade Jetty to 6.x |  Major | . | Devaraj Das | Chris Douglas |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/RELEASENOTES.0.20.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/RELEASENOTES.0.20.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/RELEASENOTES.0.20.0.md
index 339d7ab..4e13959 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/RELEASENOTES.0.20.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.0/RELEASENOTES.0.20.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -306,7 +306,7 @@ Removed deprecated method parseArgs from org.apache.hadoop.fs.FileSystem.
 
 * [HADOOP-3497](https://issues.apache.org/jira/browse/HADOOP-3497) | *Major* | **File globbing with a PathFilter is too restrictive**
 
-Changed the semantics of file globbing with a PathFilter (using the globStatus method of FileSystem). Previously, the filtering was too restrictive, so that a glob of /*/* and a filter that only accepts /a/b would not have matched /a/b. With this change /a/b does match.
+Changed the semantics of file globbing with a PathFilter (using the globStatus method of FileSystem). Previously, the filtering was too restrictive, so that a glob of /\*/\* and a filter that only accepts /a/b would not have matched /a/b. With this change /a/b does match.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/CHANGES.0.20.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/CHANGES.0.20.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/CHANGES.0.20.1.md
index 59af3f2..45ca0d7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/CHANGES.0.20.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/CHANGES.0.20.1.md
@@ -28,6 +28,12 @@
 | [HADOOP-5726](https://issues.apache.org/jira/browse/HADOOP-5726) | Remove pre-emption from the capacity scheduler code base |  Major | . | Hemanth Yamijala | rahul k singh |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/RELEASENOTES.0.20.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/RELEASENOTES.0.20.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/RELEASENOTES.0.20.1.md
index afe74f1..953c100 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/RELEASENOTES.0.20.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.1/RELEASENOTES.0.20.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -46,7 +46,7 @@ Post HADOOP-4372, empty job history files caused NPE. This issues fixes that by
 
 * [HADOOP-5921](https://issues.apache.org/jira/browse/HADOOP-5921) | *Major* | **JobTracker does not come up because of NotReplicatedYetException**
 
-Jobtracker crashes if it fails to create jobtracker.info file (i.e if sufficient datanodes are not up). With this patch it keeps on retrying on IOExceptions assuming IOExceptions in jobtracker.info creation implies that the hdfs is not in *ready *state.
+Jobtracker crashes if it fails to create jobtracker.info file (i.e if sufficient datanodes are not up). With this patch it keeps on retrying on IOExceptions assuming IOExceptions in jobtracker.info creation implies that the hdfs is not in \*ready \*state.
 
 
 ---
@@ -142,7 +142,7 @@ Job initialization process was changed to not change (run) states during initial
 - this can lead to deadlock as state changes require circular locking (i.e JobInProgress requires JobTracker lock)
 - events were not raised as these state changes were not informed/propogated back to the JobTracker
 
-Now the JobTracker takes care of initializing/failing/killing the job and raising appropriate events. The simple rule that was enforced was that "The JobTracker lock is *must* before changing the run-state of a job".
+Now the JobTracker takes care of initializing/failing/killing the job and raising appropriate events. The simple rule that was enforced was that "The JobTracker lock is \*must\* before changing the run-state of a job".
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/CHANGES.0.20.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/CHANGES.0.20.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/CHANGES.0.20.2.md
index fb21db9..3ca5bdb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/CHANGES.0.20.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/CHANGES.0.20.2.md
@@ -27,6 +27,12 @@
 | [HDFS-793](https://issues.apache.org/jira/browse/HDFS-793) | DataNode should first receive the whole packet ack message before it constructs and sends its own ack message for the packet |  Blocker | datanode | Hairong Kuang | Hairong Kuang |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/RELEASENOTES.0.20.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/RELEASENOTES.0.20.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/RELEASENOTES.0.20.2.md
index f363e0a..2ebfdc0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/RELEASENOTES.0.20.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.2/RELEASENOTES.0.20.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/CHANGES.0.20.203.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/CHANGES.0.20.203.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/CHANGES.0.20.203.0.md
index d951eef..b45f75a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/CHANGES.0.20.203.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/CHANGES.0.20.203.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/RELEASENOTES.0.20.203.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/RELEASENOTES.0.20.203.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/RELEASENOTES.0.20.203.0.md
index 1c1dd4c..4138475 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/RELEASENOTES.0.20.203.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.0/RELEASENOTES.0.20.203.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.203.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -73,10 +73,10 @@ Configuration changes:
 
 This patch does four things:
 
-    * it changes the directory structure of the done directory that holds history logs for jobs that are completed,
-    * it builds toy databases for completed jobs, so we no longer have to scan 2N files on DFS to find out facts about the N jobs that have completed since the job tracker started [which can be hundreds of thousands of files in practical cases],
-    * it changes the job history browser to display more information and allow more filtering criteria, and
-    * it creates a new programmatic interface for finding files matching user-chosen criteria. This allows users to no longer be concerned with our methods of storing them, in turn allowing us to change those at will.
+\* it changes the directory structure of the done directory that holds history logs for jobs that are completed,
+\* it builds toy databases for completed jobs, so we no longer have to scan 2N files on DFS to find out facts about the N jobs that have completed since the job tracker started [which can be hundreds of thousands of files in practical cases],
+\* it changes the job history browser to display more information and allow more filtering criteria, and
+\* it creates a new programmatic interface for finding files matching user-chosen criteria. This allows users to no longer be concerned with our methods of storing them, in turn allowing us to change those at will.
 
 The new API described above, which can be used to programmatically obtain history file PATHs given search criteria, is described below:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/CHANGES.0.20.203.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/CHANGES.0.20.203.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/CHANGES.0.20.203.1.md
index def258c..a288a36 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/CHANGES.0.20.203.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/CHANGES.0.20.203.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.20.203.1 - Unreleased
+## Release 0.20.203.1 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/RELEASENOTES.0.20.203.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/RELEASENOTES.0.20.203.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/RELEASENOTES.0.20.203.1.md
index 5827567..97f4784 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/RELEASENOTES.0.20.203.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.203.1/RELEASENOTES.0.20.203.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.203.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/CHANGES.0.20.204.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/CHANGES.0.20.204.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/CHANGES.0.20.204.0.md
index b7eb2b9..a9d18ef 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/CHANGES.0.20.204.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/CHANGES.0.20.204.0.md
@@ -27,6 +27,12 @@
 | [HADOOP-6255](https://issues.apache.org/jira/browse/HADOOP-6255) | Create an rpm integration project |  Major | . | Owen O'Malley | Eric Yang |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/RELEASENOTES.0.20.204.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/RELEASENOTES.0.20.204.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/RELEASENOTES.0.20.204.0.md
index 7a5f560..ae774bd 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/RELEASENOTES.0.20.204.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.204.0/RELEASENOTES.0.20.204.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.204.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/CHANGES.0.20.205.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/CHANGES.0.20.205.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/CHANGES.0.20.205.0.md
index f032539..1e26cb4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/CHANGES.0.20.205.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/CHANGES.0.20.205.0.md
@@ -29,6 +29,12 @@
 | [HDFS-630](https://issues.apache.org/jira/browse/HDFS-630) | In DFSOutputStream.nextBlockOutputStream(), the client can exclude specific datanodes when locating the next block. |  Major | hdfs-client, namenode | Ruyue Ma | Cosmin Lehene |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/RELEASENOTES.0.20.205.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/RELEASENOTES.0.20.205.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/RELEASENOTES.0.20.205.0.md
index 269401a..c70b653 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/RELEASENOTES.0.20.205.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.205.0/RELEASENOTES.0.20.205.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.205.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/CHANGES.0.20.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/CHANGES.0.20.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/CHANGES.0.20.3.md
index 1bac3d5..3a5e749 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/CHANGES.0.20.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/CHANGES.0.20.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.20.3 - Unreleased
+## Release 0.20.3 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -29,6 +29,12 @@
 | [HDFS-132](https://issues.apache.org/jira/browse/HDFS-132) | Namenode in Safemode reports to Simon non-zero number of deleted files during startup |  Minor | namenode | Hairong Kuang | Suresh Srinivas |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/RELEASENOTES.0.20.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/RELEASENOTES.0.20.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/RELEASENOTES.0.20.3.md
index 5e71d30..4f48cfc 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/RELEASENOTES.0.20.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.20.3/RELEASENOTES.0.20.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.20.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/CHANGES.0.21.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/CHANGES.0.21.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/CHANGES.0.21.0.md
index da203dc..75c62a1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/CHANGES.0.21.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/CHANGES.0.21.0.md
@@ -107,6 +107,12 @@
 | [MAPREDUCE-157](https://issues.apache.org/jira/browse/MAPREDUCE-157) | Job History log file format is not friendly for external tools. |  Major | . | Owen O'Malley | Jothi Padmanabhan |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/RELEASENOTES.0.21.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/RELEASENOTES.0.21.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/RELEASENOTES.0.21.0.md
index 97016b2..9f341c1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/RELEASENOTES.0.21.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.0/RELEASENOTES.0.21.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.21.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -503,7 +503,7 @@ Jars passed to the -libjars option of hadoop jars are no longer unpacked inside
 
 * [HADOOP-5144](https://issues.apache.org/jira/browse/HADOOP-5144) | *Major* | **manual way of turning on restore of failed storage replicas for namenode**
 
-New DFSAdmin command -restoreFailedStorage true|false|check sets policy for restoring failed fsimage/editslog volumes.
+New DFSAdmin command -restoreFailedStorage true\|false\|check sets policy for restoring failed fsimage/editslog volumes.
 
 
 ---
@@ -1624,7 +1624,7 @@ Fixed TaskTracker and related classes so as to set correct and most restrictive
 $mapred.local.dir
    `-- taskTracker
         `-- $user
-               |- distcache
+               \|- distcache
                `-- jobcache
  - Distributed cache files/archives are now user-owned by the job-owner and the group-owned by the special group-owner of the task-controller binary. The files/archives are set most private permissions possible, and as soon as possible, immediately after the files/dirs are first localized on the TT.
  - As depicted by the new directory structure, a directory corresponding to each user is created on each TT when that particular user's first task are assigned to the corresponding TT. These user directories remain on the TT forever are not cleaned when unused, which is targeted to be fixed via MAPREDUCE-1019.
@@ -1951,7 +1951,7 @@ Added a new target 'test-commit' to the build.xml file which runs tests specifie
 
 * [MAPREDUCE-656](https://issues.apache.org/jira/browse/MAPREDUCE-656) | *Major* | **Change org.apache.hadoop.mapred.SequenceFile\* classes to use new api**
 
-Ports the SequenceFile* classes to the new Map/Reduce API
+Ports the SequenceFile\* classes to the new Map/Reduce API
 
 
 ---
@@ -2224,7 +2224,7 @@ Only one MR cluster is brought up and hence there is no scope of jobid clashing.
 
 * [MAPREDUCE-144](https://issues.apache.org/jira/browse/MAPREDUCE-144) | *Major* | **TaskMemoryManager should log process-tree's status while killing tasks.**
 
-Modified TaskMemoryManager so that it logs a map/reduce task's process-tree's status just before it is killed when it grows out of its configured memory limits. The log dump is in the format " |- PID PPID PGRPID SESSID CMD\_NAME VMEM\_USAGE(BYTES) FULL\_CMD\_LINE".
+Modified TaskMemoryManager so that it logs a map/reduce task's process-tree's status just before it is killed when it grows out of its configured memory limits. The log dump is in the format " \|- PID PPID PGRPID SESSID CMD\_NAME VMEM\_USAGE(BYTES) FULL\_CMD\_LINE".
 
 This is useful for debugging the cause for a map/reduce task and it's corresponding process-tree to be killed by the TaskMemoryManager.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/CHANGES.0.21.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/CHANGES.0.21.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/CHANGES.0.21.1.md
index 7acc4fc..c5e4468 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/CHANGES.0.21.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/CHANGES.0.21.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.21.1 - Unreleased
+## Release 0.21.1 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -27,6 +27,12 @@
 | [MAPREDUCE-1905](https://issues.apache.org/jira/browse/MAPREDUCE-1905) | Context.setStatus() and progress() api are ignored |  Blocker | task | Amareshwari Sriramadasu | Amareshwari Sriramadasu |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/RELEASENOTES.0.21.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/RELEASENOTES.0.21.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/RELEASENOTES.0.21.1.md
index 1d88d20..0e2a4e8 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/RELEASENOTES.0.21.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.21.1/RELEASENOTES.0.21.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.21.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -32,7 +32,7 @@ Updated the help for the touchz command.
 
 * [HADOOP-7117](https://issues.apache.org/jira/browse/HADOOP-7117) | *Major* | **Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml**
 
-Removed references to the older fs.checkpoint.* properties that resided in core-site.xml
+Removed references to the older fs.checkpoint.\* properties that resided in core-site.xml
 
 
 ---
@@ -53,7 +53,7 @@ I have just committed this to 0.21 and trunk. Thanks Vinay.
 
 * [HDFS-1596](https://issues.apache.org/jira/browse/HDFS-1596) | *Major* | **Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml**
 
-Removed references to the older fs.checkpoint.* properties that resided in core-site.xml
+Removed references to the older fs.checkpoint.\* properties that resided in core-site.xml
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/CHANGES.0.22.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/CHANGES.0.22.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/CHANGES.0.22.0.md
index 2869b09..40de51c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/CHANGES.0.22.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/CHANGES.0.22.0.md
@@ -49,6 +49,12 @@
 | [MAPREDUCE-1664](https://issues.apache.org/jira/browse/MAPREDUCE-1664) | Job Acls affect Queue Acls |  Major | security | Ravi Gummadi | Ravi Gummadi |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/RELEASENOTES.0.22.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/RELEASENOTES.0.22.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/RELEASENOTES.0.22.0.md
index 30368cc..41ffd77 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/RELEASENOTES.0.22.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.0/RELEASENOTES.0.22.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.22.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -74,7 +74,7 @@ I have just committed this to trunk and branch-0.22. Thanks Roman!
 
 * [HADOOP-7117](https://issues.apache.org/jira/browse/HADOOP-7117) | *Major* | **Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml**
 
-Removed references to the older fs.checkpoint.* properties that resided in core-site.xml
+Removed references to the older fs.checkpoint.\* properties that resided in core-site.xml
 
 
 ---
@@ -208,7 +208,7 @@ Removed thriftfs contrib component.
 
 * [HDFS-1596](https://issues.apache.org/jira/browse/HDFS-1596) | *Major* | **Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml**
 
-Removed references to the older fs.checkpoint.* properties that resided in core-site.xml
+Removed references to the older fs.checkpoint.\* properties that resided in core-site.xml
 
 
 ---
@@ -510,6 +510,7 @@ Added a metric to track number of heartbeats processed by the JobTracker.
 
 * [MAPREDUCE-1664](https://issues.apache.org/jira/browse/MAPREDUCE-1664) | *Major* | **Job Acls affect Queue Acls**
 
+<!-- markdown -->
 * Removed aclsEnabled flag from queues configuration files.
 * Removed the configuration property mapreduce.cluster.job-authorization-enabled.
 * Added mapreduce.cluster.acls.enabled as the single configuration property in mapred-default.xml that enables the authorization checks for all job level and queue level operations.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/CHANGES.0.22.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/CHANGES.0.22.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/CHANGES.0.22.1.md
index f77dab8..ac195f0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/CHANGES.0.22.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/CHANGES.0.22.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.22.1 - Unreleased
+## Release 0.22.1 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -27,6 +27,12 @@
 | [HADOOP-6453](https://issues.apache.org/jira/browse/HADOOP-6453) | Hadoop wrapper script shouldn't ignore an existing JAVA\_LIBRARY\_PATH |  Minor | scripts | Chad Metcalf |  |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/RELEASENOTES.0.22.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/RELEASENOTES.0.22.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/RELEASENOTES.0.22.1.md
index 32b973d..f08f501 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/RELEASENOTES.0.22.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.22.1/RELEASENOTES.0.22.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.22.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -32,7 +32,7 @@ Adding support for Kerberos HTTP SPNEGO authentication to the Hadoop web-console
 
 * [HADOOP-6995](https://issues.apache.org/jira/browse/HADOOP-6995) | *Minor* | **Allow wildcards to be used in ProxyUsers configurations**
 
-When configuring proxy users and hosts, the special wildcard value "*" may be specified to match any host or any user.
+When configuring proxy users and hosts, the special wildcard value "\*" may be specified to match any host or any user.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/CHANGES.0.23.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/CHANGES.0.23.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/CHANGES.0.23.0.md
index e7a6506..cefa86d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/CHANGES.0.23.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/CHANGES.0.23.0.md
@@ -69,6 +69,12 @@
 | [MAPREDUCE-1738](https://issues.apache.org/jira/browse/MAPREDUCE-1738) | MapReduce portion of HADOOP-6728 (ovehaul metrics framework) |  Major | . | Luke Lu | Luke Lu |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/RELEASENOTES.0.23.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/RELEASENOTES.0.23.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/RELEASENOTES.0.23.0.md
index ffe52ab..69e364f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/RELEASENOTES.0.23.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.0/RELEASENOTES.0.23.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -238,7 +238,7 @@ Adding support for Kerberos HTTP SPNEGO authentication to the Hadoop web-console
 
 * [HADOOP-7117](https://issues.apache.org/jira/browse/HADOOP-7117) | *Major* | **Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml**
 
-Removed references to the older fs.checkpoint.* properties that resided in core-site.xml
+Removed references to the older fs.checkpoint.\* properties that resided in core-site.xml
 
 
 ---
@@ -464,7 +464,7 @@ Removed the deprecated fields in DataTransferProtocol.
 
 * [HDFS-1939](https://issues.apache.org/jira/browse/HDFS-1939) | *Major* | **ivy: test conf should not extend common conf**
 
-* Removed duplicated jars in test class path.
+\* Removed duplicated jars in test class path.
 
 
 ---
@@ -562,7 +562,7 @@ Added two configuration properties, dfs.client.block.write.replace-datanode-on-f
 
 * [HDFS-1596](https://issues.apache.org/jira/browse/HDFS-1596) | *Major* | **Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml**
 
-Removed references to the older fs.checkpoint.* properties that resided in core-site.xml
+Removed references to the older fs.checkpoint.\* properties that resided in core-site.xml
 
 
 ---
@@ -1064,9 +1064,9 @@ Adds cpu, physical memory, virtual memory and heap usages to TraceBuilder's outp
 * [MAPREDUCE-2037](https://issues.apache.org/jira/browse/MAPREDUCE-2037) | *Major* | **Capturing interim progress times, CPU usage, and memory usage, when tasks reach certain progress thresholds**
 
 Capture intermediate task resource consumption information:
-* Time taken so far
-* CPU load [either at the time the data are taken, or exponentially smoothed]
-* Memory load [also either at the time the data are taken, or exponentially smoothed]
+\* Time taken so far
+\* CPU load [either at the time the data are taken, or exponentially smoothed]
+\* Memory load [also either at the time the data are taken, or exponentially smoothed]
 
 This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.
 
@@ -1157,8 +1157,8 @@ MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what
 The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM).  An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
 
 The ResourceManager has two main components:
-* Scheduler (S)
-* ApplicationsManager (ASM)
+\* Scheduler (S)
+\* ApplicationsManager (ASM)
 
 The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc. 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/CHANGES.0.23.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/CHANGES.0.23.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/CHANGES.0.23.1.md
index 1d69f4e..dd31769 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/CHANGES.0.23.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/CHANGES.0.23.1.md
@@ -30,6 +30,12 @@
 | [MAPREDUCE-3720](https://issues.apache.org/jira/browse/MAPREDUCE-3720) | Command line listJobs should not visit each AM |  Major | client, mrv2 | Vinod Kumar Vavilapalli | Vinod Kumar Vavilapalli |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/RELEASENOTES.0.23.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/RELEASENOTES.0.23.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/RELEASENOTES.0.23.1.md
index 08c9df0..569e1ed 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/RELEASENOTES.0.23.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.1/RELEASENOTES.0.23.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -46,7 +46,7 @@ Adding config for MapReduce History Server protocol in hadoop-policy.xml for ser
 
 * [HADOOP-7963](https://issues.apache.org/jira/browse/HADOOP-7963) | *Blocker* | **test failures: TestViewFileSystemWithAuthorityLocalFileSystem and TestViewFileSystemLocalFileSystem**
 
-Fix ViewFS to catch a null canonical service-name and pass tests TestViewFileSystem*
+Fix ViewFS to catch a null canonical service-name and pass tests TestViewFileSystem\*
 
 
 ---
@@ -308,7 +308,7 @@ Fixed TaskHeartbeatHandler to not hold a global lock for all task-updates.
 
 * [MAPREDUCE-3597](https://issues.apache.org/jira/browse/MAPREDUCE-3597) | *Major* | **Provide a way to access other info of history file from Rumentool**
 
-Rumen now provides {{Parsed*}} objects. These objects provide extra information that are not provided by {{Logged*}} objects.
+Rumen now provides {{Parsed\*}} objects. These objects provide extra information that are not provided by {{Logged\*}} objects.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/CHANGES.0.23.10.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/CHANGES.0.23.10.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/CHANGES.0.23.10.md
index 82bef03..75b818f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/CHANGES.0.23.10.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/CHANGES.0.23.10.md
@@ -27,6 +27,12 @@
 | [YARN-707](https://issues.apache.org/jira/browse/YARN-707) | Add user info in the YARN ClientToken |  Blocker | . | Bikas Saha | Jason Lowe |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/RELEASENOTES.0.23.10.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/RELEASENOTES.0.23.10.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/RELEASENOTES.0.23.10.md
index f1388d0..be5dc41 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/RELEASENOTES.0.23.10.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.10/RELEASENOTES.0.23.10.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.10 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/CHANGES.0.23.11.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/CHANGES.0.23.11.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/CHANGES.0.23.11.md
index 3259d86..8313a69 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/CHANGES.0.23.11.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/CHANGES.0.23.11.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/RELEASENOTES.0.23.11.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/RELEASENOTES.0.23.11.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/RELEASENOTES.0.23.11.md
index 7fb4718..a44802e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/RELEASENOTES.0.23.11.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.11/RELEASENOTES.0.23.11.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.11 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/CHANGES.0.23.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/CHANGES.0.23.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/CHANGES.0.23.2.md
index cd1c26a..37b85a5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/CHANGES.0.23.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/CHANGES.0.23.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.23.2 - Unreleased
+## Release 0.23.2 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -29,6 +29,12 @@
 | [HDFS-2887](https://issues.apache.org/jira/browse/HDFS-2887) | Define a FSVolume interface |  Major | datanode | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/RELEASENOTES.0.23.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/RELEASENOTES.0.23.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/RELEASENOTES.0.23.2.md
index 6591565..ed372ba 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/RELEASENOTES.0.23.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.2/RELEASENOTES.0.23.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -32,7 +32,7 @@ FsShell mkdir now accepts a -p flag.  Like unix, mkdir -p will not fail if the d
 
 * [HADOOP-8164](https://issues.apache.org/jira/browse/HADOOP-8164) | *Major* | **Handle paths using back slash as path separator for windows only**
 
-This jira only allows providing paths using back slash as separator on Windows. The back slash on *nix system will be used as escape character. The support for paths using back slash as path separator will be removed in HADOOP-8139 in release 23.3.
+This jira only allows providing paths using back slash as separator on Windows. The back slash on \*nix system will be used as escape character. The support for paths using back slash as path separator will be removed in HADOOP-8139 in release 23.3.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/CHANGES.0.23.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/CHANGES.0.23.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/CHANGES.0.23.3.md
index e997154..9b50eaf 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/CHANGES.0.23.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/CHANGES.0.23.3.md
@@ -31,6 +31,12 @@
 | [MAPREDUCE-3812](https://issues.apache.org/jira/browse/MAPREDUCE-3812) | Lower default allocation sizes, fix allocation configurations and document them |  Major | mrv2, performance | Vinod Kumar Vavilapalli | Harsh J |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/RELEASENOTES.0.23.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/RELEASENOTES.0.23.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/RELEASENOTES.0.23.3.md
index b003778..48e4473 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/RELEASENOTES.0.23.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/RELEASENOTES.0.23.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/CHANGES.0.23.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/CHANGES.0.23.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/CHANGES.0.23.4.md
index eeba2cb..b9b7eee 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/CHANGES.0.23.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/CHANGES.0.23.4.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/RELEASENOTES.0.23.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/RELEASENOTES.0.23.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/RELEASENOTES.0.23.4.md
index ec7073d..29453f4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/RELEASENOTES.0.23.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.4/RELEASENOTES.0.23.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.4 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/CHANGES.0.23.5.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/CHANGES.0.23.5.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/CHANGES.0.23.5.md
index 56568ab..0e76124 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/CHANGES.0.23.5.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/CHANGES.0.23.5.md
@@ -27,6 +27,12 @@
 | [HDFS-4080](https://issues.apache.org/jira/browse/HDFS-4080) | Add a separate logger for block state change logs to enable turning off those logs |  Major | namenode | Kihwal Lee | Kihwal Lee |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/RELEASENOTES.0.23.5.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/RELEASENOTES.0.23.5.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/RELEASENOTES.0.23.5.md
index d8efa40..0d292e9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/RELEASENOTES.0.23.5.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.5/RELEASENOTES.0.23.5.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.5 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/CHANGES.0.23.6.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/CHANGES.0.23.6.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/CHANGES.0.23.6.md
index 2a34ad7..6ac205f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/CHANGES.0.23.6.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/CHANGES.0.23.6.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/RELEASENOTES.0.23.6.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/RELEASENOTES.0.23.6.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/RELEASENOTES.0.23.6.md
index 5038bad..34a8707 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/RELEASENOTES.0.23.6.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.6/RELEASENOTES.0.23.6.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.6 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/CHANGES.0.23.7.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/CHANGES.0.23.7.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/CHANGES.0.23.7.md
index ff0352f..d67a6b7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/CHANGES.0.23.7.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/CHANGES.0.23.7.md
@@ -27,6 +27,12 @@
 | [HDFS-395](https://issues.apache.org/jira/browse/HDFS-395) | DFS Scalability: Incremental block reports |  Major | datanode, namenode | dhruba borthakur | Tomasz Nykiel |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/RELEASENOTES.0.23.7.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/RELEASENOTES.0.23.7.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/RELEASENOTES.0.23.7.md
index 17ff954..e367c5d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/RELEASENOTES.0.23.7.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.7/RELEASENOTES.0.23.7.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.7 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/CHANGES.0.23.8.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/CHANGES.0.23.8.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/CHANGES.0.23.8.md
index 707f411..04e8036 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/CHANGES.0.23.8.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/CHANGES.0.23.8.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/RELEASENOTES.0.23.8.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/RELEASENOTES.0.23.8.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/RELEASENOTES.0.23.8.md
index b3f8e71..719d885 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/RELEASENOTES.0.23.8.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.8/RELEASENOTES.0.23.8.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.8 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/CHANGES.0.23.9.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/CHANGES.0.23.9.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/CHANGES.0.23.9.md
index 6d02d85..fed8832 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/CHANGES.0.23.9.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/CHANGES.0.23.9.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/RELEASENOTES.0.23.9.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/RELEASENOTES.0.23.9.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/RELEASENOTES.0.23.9.md
index 81a267f..f4ff14d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/RELEASENOTES.0.23.9.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.9/RELEASENOTES.0.23.9.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.23.9 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/CHANGES.0.24.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/CHANGES.0.24.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/CHANGES.0.24.0.md
index 13f0958..919f4dd 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/CHANGES.0.24.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/CHANGES.0.24.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.24.0 - Unreleased
+## Release 0.24.0 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -28,6 +28,12 @@
 | [HADOOP-7507](https://issues.apache.org/jira/browse/HADOOP-7507) | jvm metrics all use the same namespace |  Major | metrics | Jeff Bean | Alejandro Abdelnur |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/RELEASENOTES.0.24.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/RELEASENOTES.0.24.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/RELEASENOTES.0.24.0.md
index 7aa2984..6c43809 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/RELEASENOTES.0.24.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.24.0/RELEASENOTES.0.24.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.24.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/CHANGES.0.3.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/CHANGES.0.3.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/CHANGES.0.3.0.md
index db6218f..a2da3ed 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/CHANGES.0.3.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/CHANGES.0.3.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/RELEASENOTES.0.3.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/RELEASENOTES.0.3.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/RELEASENOTES.0.3.0.md
index e648a8e..50a2995 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/RELEASENOTES.0.3.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.0/RELEASENOTES.0.3.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.3.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/CHANGES.0.3.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/CHANGES.0.3.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/CHANGES.0.3.1.md
index ba7aebf..9bf1d66 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/CHANGES.0.3.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/CHANGES.0.3.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/RELEASENOTES.0.3.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/RELEASENOTES.0.3.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/RELEASENOTES.0.3.1.md
index de4e1cd3..1df1d5e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/RELEASENOTES.0.3.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.1/RELEASENOTES.0.3.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.3.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/CHANGES.0.3.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/CHANGES.0.3.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/CHANGES.0.3.2.md
index 6b67b0a..dd30d8c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/CHANGES.0.3.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/CHANGES.0.3.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/RELEASENOTES.0.3.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/RELEASENOTES.0.3.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/RELEASENOTES.0.3.2.md
index de9320f..38d49eb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/RELEASENOTES.0.3.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.3.2/RELEASENOTES.0.3.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.3.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/CHANGES.0.4.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/CHANGES.0.4.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/CHANGES.0.4.0.md
index c5a6a5d..ac6b87d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/CHANGES.0.4.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/CHANGES.0.4.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/RELEASENOTES.0.4.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/RELEASENOTES.0.4.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/RELEASENOTES.0.4.0.md
index 36576dd..8907b39 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/RELEASENOTES.0.4.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.4.0/RELEASENOTES.0.4.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.4.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/CHANGES.0.5.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/CHANGES.0.5.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/CHANGES.0.5.0.md
index 1aeccf9..e7c8244 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/CHANGES.0.5.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/CHANGES.0.5.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/RELEASENOTES.0.5.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/RELEASENOTES.0.5.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/RELEASENOTES.0.5.0.md
index 3fe30f5..cc7674a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/RELEASENOTES.0.5.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.5.0/RELEASENOTES.0.5.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.5.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/CHANGES.0.6.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/CHANGES.0.6.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/CHANGES.0.6.0.md
index 5cd6567..0e2b761 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/CHANGES.0.6.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/CHANGES.0.6.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/RELEASENOTES.0.6.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/RELEASENOTES.0.6.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/RELEASENOTES.0.6.0.md
index ca86d83..105599b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/RELEASENOTES.0.6.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.0/RELEASENOTES.0.6.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.6.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/CHANGES.0.6.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/CHANGES.0.6.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/CHANGES.0.6.1.md
index 031aa7f..d044e63 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/CHANGES.0.6.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/CHANGES.0.6.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/RELEASENOTES.0.6.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/RELEASENOTES.0.6.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/RELEASENOTES.0.6.1.md
index 39da5b8..8c6d666 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/RELEASENOTES.0.6.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/RELEASENOTES.0.6.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.6.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/CHANGES.0.6.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/CHANGES.0.6.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/CHANGES.0.6.2.md
index 858c777..6303874 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/CHANGES.0.6.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/CHANGES.0.6.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/RELEASENOTES.0.6.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/RELEASENOTES.0.6.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/RELEASENOTES.0.6.2.md
index cd13ea2..0adfa37 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/RELEASENOTES.0.6.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.2/RELEASENOTES.0.6.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.6.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/CHANGES.0.7.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/CHANGES.0.7.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/CHANGES.0.7.0.md
index 95657c8..ec97937 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/CHANGES.0.7.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/CHANGES.0.7.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/RELEASENOTES.0.7.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/RELEASENOTES.0.7.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/RELEASENOTES.0.7.0.md
index 55530fe..e9d43c0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/RELEASENOTES.0.7.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.0/RELEASENOTES.0.7.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.7.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/CHANGES.0.7.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/CHANGES.0.7.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/CHANGES.0.7.1.md
index 9fdf922..a0a134b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/CHANGES.0.7.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/CHANGES.0.7.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/RELEASENOTES.0.7.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/RELEASENOTES.0.7.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/RELEASENOTES.0.7.1.md
index b4660b8..5e46d2e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/RELEASENOTES.0.7.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.7.1/RELEASENOTES.0.7.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.7.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 


[21/34] hadoop git commit: HADOOP-12895. SSLFactory#createSSLSocketFactory exception message is wrong. Contributed by Wei-Chiu Chuang.

Posted by ar...@apache.org.
HADOOP-12895. SSLFactory#createSSLSocketFactory exception message is wrong. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3cc6e25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3cc6e25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3cc6e25

Branch: refs/heads/HDFS-1312
Commit: a3cc6e2511e096ea9a54f500b59257866a1df66b
Parents: 3c33158
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Mar 8 13:51:20 2016 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Mar 8 13:51:48 2016 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/security/ssl/SSLFactory.java     | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3cc6e25/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
index 518de80..ea65848 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
@@ -212,7 +212,8 @@ public class SSLFactory implements ConnectionConfigurator {
   public SSLServerSocketFactory createSSLServerSocketFactory()
     throws GeneralSecurityException, IOException {
     if (mode != Mode.SERVER) {
-      throw new IllegalStateException("Factory is in CLIENT mode");
+      throw new IllegalStateException(
+          "Factory is not in SERVER mode. Actual mode is " + mode.toString());
     }
     return context.getServerSocketFactory();
   }
@@ -229,7 +230,8 @@ public class SSLFactory implements ConnectionConfigurator {
   public SSLSocketFactory createSSLSocketFactory()
     throws GeneralSecurityException, IOException {
     if (mode != Mode.CLIENT) {
-      throw new IllegalStateException("Factory is in CLIENT mode");
+      throw new IllegalStateException(
+          "Factory is not in CLIENT mode. Actual mode is " + mode.toString());
     }
     return context.getSocketFactory();
   }
@@ -241,7 +243,8 @@ public class SSLFactory implements ConnectionConfigurator {
    */
   public HostnameVerifier getHostnameVerifier() {
     if (mode != Mode.CLIENT) {
-      throw new IllegalStateException("Factory is in CLIENT mode");
+      throw new IllegalStateException(
+          "Factory is not in CLIENT mode. Actual mode is " + mode.toString());
     }
     return hostnameVerifier;
   }


[16/34] hadoop git commit: HDFS-9882. Add heartbeatsTotal in Datanode metrics. (Contributed by Hua Liu)

Posted by ar...@apache.org.
HDFS-9882. Add heartbeatsTotal in Datanode metrics. (Contributed by Hua Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2140d05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2140d05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2140d05

Branch: refs/heads/HDFS-1312
Commit: c2140d05efaf18b41caae8c61d9f6d668ab0e874
Parents: 49eedc7
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Mar 7 21:10:24 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Mar 7 21:10:24 2016 -0800

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md                  | 2 ++
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java  | 5 +++++
 .../hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java    | 5 +++++
 3 files changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2140d05/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index b660b16..699316f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -290,6 +290,8 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `ReplaceBlockOpAvgTime` | Average time of block replace operations in milliseconds |
 | `HeartbeatsNumOps` | Total number of heartbeats |
 | `HeartbeatsAvgTime` | Average heartbeat time in milliseconds |
+| `HeartbeatsTotalNumOps` | Total number of heartbeats which is a duplicate of HeartbeatsNumOps |
+| `HeartbeatsTotalAvgTime` | Average total heartbeat time in milliseconds |
 | `LifelinesNumOps` | Total number of lifeline messages |
 | `LifelinesAvgTime` | Average lifeline message processing time in milliseconds |
 | `BlockReportsNumOps` | Total number of block report operations |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2140d05/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 7184a49..49f64c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -607,6 +607,11 @@ class BPServiceActor implements Runnable {
           processCommand(new DatanodeCommand[]{ cmd });
         }
 
+        if (sendHeartbeat) {
+          dn.getMetrics().addHeartbeatTotal(
+              scheduler.monotonicNow() - startTime);
+        }
+
         // There is no work to do;  sleep until hearbeat timer elapses, 
         // or work arrives, and then iterate again.
         ibrManager.waitTillNextIBR(scheduler.getHeartbeatWaitTime());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2140d05/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index aa518fb..085762b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -107,6 +107,7 @@ public class DataNodeMetrics {
   @Metric MutableRate copyBlockOp;
   @Metric MutableRate replaceBlockOp;
   @Metric MutableRate heartbeats;
+  @Metric MutableRate heartbeatsTotal;
   @Metric MutableRate lifelines;
   @Metric MutableRate blockReports;
   @Metric MutableRate incrementalBlockReports;
@@ -200,6 +201,10 @@ public class DataNodeMetrics {
     heartbeats.add(latency);
   }
 
+  public void addHeartbeatTotal(long latency) {
+    heartbeatsTotal.add(latency);
+  }
+
   public void addLifeline(long latency) {
     lifelines.add(latency);
   }


[32/34] hadoop git commit: HADOOP-12903. IPC Server should allow suppressing exception logging by type, not log 'server too busy' messages. (Arpit Agarwal)

Posted by ar...@apache.org.
HADOOP-12903. IPC Server should allow suppressing exception logging by type, not log 'server too busy' messages. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e040d31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e040d31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e040d31

Branch: refs/heads/HDFS-1312
Commit: 2e040d31c7bba021576e6baf267d937da7ff814a
Parents: 87c8005
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Mar 8 23:29:43 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Mar 8 23:29:43 2016 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/ipc/Server.java | 134 ++++++++++++++-----
 .../java/org/apache/hadoop/ipc/TestServer.java  |  71 +++++++++-
 2 files changed, 163 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e040d31/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 26043a7..1d92865 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -142,8 +142,22 @@ public abstract class Server {
   private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
   private Tracer tracer;
   
+  /**
+   * Add exception classes for which server won't log stack traces.
+   *
+   * @param exceptionClass exception classes
+   */
   public void addTerseExceptions(Class<?>... exceptionClass) {
-    exceptionsHandler.addTerseExceptions(exceptionClass);
+    exceptionsHandler.addTerseLoggingExceptions(exceptionClass);
+  }
+
+  /**
+   * Add exception classes which server won't log at all.
+   *
+   * @param exceptionClass exception classes
+   */
+  public void addSuppressedLoggingExceptions(Class<?>... exceptionClass) {
+    exceptionsHandler.addSuppressedLoggingExceptions(exceptionClass);
   }
 
   /**
@@ -151,29 +165,54 @@ public abstract class Server {
    * e.g., terse exception group for concise logging messages
    */
   static class ExceptionsHandler {
-    private volatile Set<String> terseExceptions = new HashSet<String>();
+    private volatile Set<String> terseExceptions = new HashSet<>();
+    private volatile Set<String> suppressedExceptions = new HashSet<>();
 
     /**
-     * Add exception class so server won't log its stack trace.
-     * Modifying the terseException through this method is thread safe.
-     *
+     * Add exception classes for which server won't log stack traces.
+     * Optimized for infrequent invocation.
      * @param exceptionClass exception classes 
      */
-    void addTerseExceptions(Class<?>... exceptionClass) {
+    void addTerseLoggingExceptions(Class<?>... exceptionClass) {
+      // Thread-safe replacement of terseExceptions.
+      terseExceptions = addExceptions(terseExceptions, exceptionClass);
+    }
+
+    /**
+     * Add exception classes which server won't log at all.
+     * Optimized for infrequent invocation.
+     * @param exceptionClass exception classes
+     */
+    void addSuppressedLoggingExceptions(Class<?>... exceptionClass) {
+      // Thread-safe replacement of suppressedExceptions.
+      suppressedExceptions = addExceptions(
+          suppressedExceptions, exceptionClass);
+    }
+
+    boolean isTerseLog(Class<?> t) {
+      return terseExceptions.contains(t.toString());
+    }
+
+    boolean isSuppressedLog(Class<?> t) {
+      return suppressedExceptions.contains(t.toString());
+    }
 
-      // Make a copy of terseException for performing modification
-      final HashSet<String> newSet = new HashSet<String>(terseExceptions);
+    /**
+     * Return a new set containing all the exceptions in exceptionsSet
+     * and exceptionClass.
+     * @return
+     */
+    private static Set<String> addExceptions(
+        final Set<String> exceptionsSet, Class<?>[] exceptionClass) {
+      // Make a copy of the exceptionSet for performing modification
+      final HashSet<String> newSet = new HashSet<>(exceptionsSet);
 
       // Add all class names into the HashSet
       for (Class<?> name : exceptionClass) {
         newSet.add(name.toString());
       }
-      // Replace terseException set
-      terseExceptions = Collections.unmodifiableSet(newSet);
-    }
 
-    boolean isTerse(Class<?> t) {
-      return terseExceptions.contains(t.toString());
+      return Collections.unmodifiableSet(newSet);
     }
   }
 
@@ -881,7 +920,7 @@ public abstract class Server {
     }
 
     void doRead(SelectionKey key) throws InterruptedException {
-      int count = 0;
+      int count;
       Connection c = (Connection)key.attachment();
       if (c == null) {
         return;  
@@ -894,13 +933,17 @@ public abstract class Server {
         LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
-        // a WrappedRpcServerException is an exception that has been sent
-        // to the client, so the stacktrace is unnecessary; any other
-        // exceptions are unexpected internal server errors and thus the
-        // stacktrace should be logged
-        LOG.info(Thread.currentThread().getName() + ": readAndProcess from client " +
-            c.getHostAddress() + " threw exception [" + e + "]",
-            (e instanceof WrappedRpcServerException) ? null : e);
+        // Do not log WrappedRpcServerExceptionSuppressed.
+        if (!(e instanceof WrappedRpcServerExceptionSuppressed)) {
+          // A WrappedRpcServerException is an exception that has been sent
+          // to the client, so the stacktrace is unnecessary; any other
+          // exceptions are unexpected internal server errors and thus the
+          // stacktrace should be logged.
+          LOG.info(Thread.currentThread().getName() +
+              ": readAndProcess from client " + c.getHostAddress() +
+              " threw exception [" + e + "]",
+              (e instanceof WrappedRpcServerException) ? null : e);
+        }
         count = -1; //so that the (count < 0) block is executed
       }
       if (count < 0) {
@@ -1243,6 +1286,18 @@ public abstract class Server {
     }
   }
 
+  /**
+   * A WrappedRpcServerException that is suppressed altogether
+   * for the purposes of logging.
+   */
+  private static class WrappedRpcServerExceptionSuppressed
+      extends WrappedRpcServerException {
+    public WrappedRpcServerExceptionSuppressed(
+        RpcErrorCodeProto errCode, IOException ioe) {
+      super(errCode, ioe);
+    }
+  }
+
   /** Reads calls from a connection and queues them for handling. */
   public class Connection {
     private boolean connectionHeaderRead = false; // connection  header is read?
@@ -2117,7 +2172,7 @@ public abstract class Server {
         rpcMetrics.incrClientBackoff();
         RetriableException retriableException =
             new RetriableException("Server is too busy.");
-        throw new WrappedRpcServerException(
+        throw new WrappedRpcServerExceptionSuppressed(
             RpcErrorCodeProto.ERROR_RPC_SERVER, retriableException);
       }
     }
@@ -2313,18 +2368,7 @@ public abstract class Server {
             if (e instanceof UndeclaredThrowableException) {
               e = e.getCause();
             }
-            String logMsg = Thread.currentThread().getName() + ", call " + call;
-            if (exceptionsHandler.isTerse(e.getClass())) {
-              // Don't log the whole stack trace. Way too noisy!
-              LOG.info(logMsg + ": " + e);
-            } else if (e instanceof RuntimeException || e instanceof Error) {
-              // These exception types indicate something is probably wrong
-              // on the server side, as opposed to just a normal exceptional
-              // result.
-              LOG.warn(logMsg, e);
-            } else {
-              LOG.info(logMsg, e);
-            }
+            logException(LOG, e, call);
             if (e instanceof RpcServerException) {
               RpcServerException rse = ((RpcServerException)e); 
               returnStatus = rse.getRpcStatusProto();
@@ -2377,6 +2421,26 @@ public abstract class Server {
     }
 
   }
+
+  @VisibleForTesting
+  void logException(Log logger, Throwable e, Call call) {
+    if (exceptionsHandler.isSuppressedLog(e.getClass())) {
+      return; // Log nothing.
+    }
+
+    final String logMsg = Thread.currentThread().getName() + ", call " + call;
+    if (exceptionsHandler.isTerseLog(e.getClass())) {
+      // Don't log the whole stack trace. Way too noisy!
+      logger.info(logMsg + ": " + e);
+    } else if (e instanceof RuntimeException || e instanceof Error) {
+      // These exception types indicate something is probably wrong
+      // on the server side, as opposed to just a normal exceptional
+      // result.
+      logger.warn(logMsg, e);
+    } else {
+      logger.info(logMsg, e);
+    }
+  }
   
   protected Server(String bindAddress, int port,
                   Class<? extends Writable> paramClass, int handlerCount, 
@@ -2482,7 +2546,7 @@ public abstract class Server {
       saslPropsResolver = SaslPropertiesResolver.getInstance(conf);
     }
     
-    this.exceptionsHandler.addTerseExceptions(StandbyException.class);
+    this.exceptionsHandler.addTerseLoggingExceptions(StandbyException.class);
   }
   
   private RpcSaslProto buildNegotiateResponse(List<AuthMethod> authMethods)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e040d31/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
index 64dc4d4..afda535 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
@@ -19,13 +19,19 @@
 package org.apache.hadoop.ipc;
 
 import static org.junit.Assert.*;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.*;
 
 import java.io.IOException;
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 
+import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.ipc.Server.Call;
 import org.junit.Test;
 
 /**
@@ -117,15 +123,66 @@ public class TestServer {
     }
   }
   
+  static class TestException1 extends Exception {
+  }
+
+  static class TestException2 extends Exception {
+  }
+
+  static class TestException3 extends Exception {
+  }
+
+  @Test (timeout=300000)
+  public void testLogExceptions() throws Exception {
+    final Configuration conf = new Configuration();
+    final Call dummyCall = new Call(0, 0, null, null);
+    Log logger = mock(Log.class);
+    Server server = new Server("0.0.0.0", 0, LongWritable.class, 1, conf) {
+      @Override
+      public Writable call(
+          RPC.RpcKind rpcKind, String protocol, Writable param,
+          long receiveTime) throws Exception {
+        return null;
+      }
+    };
+    server.addSuppressedLoggingExceptions(TestException1.class);
+    server.addTerseExceptions(TestException2.class);
+
+    // Nothing should be logged for a suppressed exception.
+    server.logException(logger, new TestException1(), dummyCall);
+    verifyZeroInteractions(logger);
+
+    // No stack trace should be logged for a terse exception.
+    server.logException(logger, new TestException2(), dummyCall);
+    verify(logger, times(1)).info(anyObject());
+
+    // Full stack trace should be logged for other exceptions.
+    final Throwable te3 = new TestException3();
+    server.logException(logger, te3, dummyCall);
+    verify(logger, times(1)).info(anyObject(), eq(te3));
+  }
+
+  @Test
+  public void testExceptionsHandlerTerse() {
+    Server.ExceptionsHandler handler = new Server.ExceptionsHandler();
+    handler.addTerseLoggingExceptions(IOException.class);
+    handler.addTerseLoggingExceptions(RpcServerException.class, IpcException.class);
+
+    assertTrue(handler.isTerseLog(IOException.class));
+    assertTrue(handler.isTerseLog(RpcServerException.class));
+    assertTrue(handler.isTerseLog(IpcException.class));
+    assertFalse(handler.isTerseLog(RpcClientException.class));
+  }
+
   @Test
-  public void testExceptionsHandler() {
+  public void testExceptionsHandlerSuppressed() {
     Server.ExceptionsHandler handler = new Server.ExceptionsHandler();
-    handler.addTerseExceptions(IOException.class);
-    handler.addTerseExceptions(RpcServerException.class, IpcException.class);
+    handler.addSuppressedLoggingExceptions(IOException.class);
+    handler.addSuppressedLoggingExceptions(RpcServerException.class, IpcException.class);
 
-    assertTrue(handler.isTerse(IOException.class));
-    assertTrue(handler.isTerse(RpcServerException.class));
-    assertTrue(handler.isTerse(IpcException.class));
-    assertFalse(handler.isTerse(RpcClientException.class));
+    assertTrue(handler.isSuppressedLog(IOException.class));
+    assertTrue(handler.isSuppressedLog(RpcServerException.class));
+    assertTrue(handler.isSuppressedLog(IpcException.class));
+    assertFalse(handler.isSuppressedLog(RpcClientException.class));
   }
 }


[31/34] hadoop git commit: HADOOP-12688. Fix deadlinks in Compatibility.md. Contributed by Gabor Liptak.

Posted by ar...@apache.org.
HADOOP-12688. Fix deadlinks in Compatibility.md. Contributed by Gabor Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87c8005a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87c8005a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87c8005a

Branch: refs/heads/HDFS-1312
Commit: 87c8005ad32347592962a7f61e8ad50671ff4bbf
Parents: 7600e3c
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Mar 9 15:32:26 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Mar 9 15:35:51 2016 +0900

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Compatibility.md             | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c8005a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 55b9a9e..c275518 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -125,8 +125,8 @@ REST API compatibility corresponds to both the request (URLs) and responses to e
 * [WebHDFS](../hadoop-hdfs/WebHDFS.html) - Stable
 * [ResourceManager](../../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html)
 * [NodeManager](../../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html)
-* [MR Application Master](../../hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html)
-* [History Server](../../hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html)
+* [MR Application Master](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredAppMasterRest.html)
+* [History Server](../../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/HistoryServerRest.html)
 * [Timeline Server v1 REST API](../../hadoop-yarn/hadoop-yarn-site/TimelineServer.html)
 
 #### Policy


[24/34] hadoop git commit: HADOOP-12798. Update changelog and release notes (2016-03-04) (aw)

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/CHANGES.2.7.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/CHANGES.2.7.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/CHANGES.2.7.1.md
index ed9100e..114b266 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/CHANGES.2.7.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/CHANGES.2.7.1.md
@@ -24,6 +24,13 @@
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |
 |:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-8226](https://issues.apache.org/jira/browse/HDFS-8226) | Non-HA rollback compatibility broken |  Blocker | . | J.Andreina | J.Andreina |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
 
 
 ### NEW FEATURES:
@@ -67,10 +74,10 @@
 | [HADOOP-11663](https://issues.apache.org/jira/browse/HADOOP-11663) | Remove description about Java 6 from docs |  Minor | documentation | Masatake Iwasaki | Masatake Iwasaki |
 | [HADOOP-9658](https://issues.apache.org/jira/browse/HADOOP-9658) | SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when native code is not loaded |  Major | . | Zhijie Shen | Zhijie Shen |
 | [HDFS-8681](https://issues.apache.org/jira/browse/HDFS-8681) | BlockScanner is incorrectly disabled by default |  Blocker | datanode | Andrew Wang | Arpit Agarwal |
-| [HDFS-8633](https://issues.apache.org/jira/browse/HDFS-8633) | Fix setting of dfs.datanode.readahead.bytes in hdfs-default.xml to match DFSConfigKeys |  Minor | HDFS | Ray Chiang | Ray Chiang |
-| [HDFS-8626](https://issues.apache.org/jira/browse/HDFS-8626) | Reserved RBW space is not released if creation of RBW File fails |  Blocker | . | kanaka kumar avvaru | kanaka kumar avvaru |
+| [HDFS-8633](https://issues.apache.org/jira/browse/HDFS-8633) | Fix setting of dfs.datanode.readahead.bytes in hdfs-default.xml to match DFSConfigKeys |  Minor | datanode | Ray Chiang | Ray Chiang |
+| [HDFS-8626](https://issues.apache.org/jira/browse/HDFS-8626) | Reserved RBW space is not released if creation of RBW File fails |  Blocker | . | Kanaka Kumar Avvaru | Kanaka Kumar Avvaru |
 | [HDFS-8600](https://issues.apache.org/jira/browse/HDFS-8600) | TestWebHdfsFileSystemContract.testGetFileBlockLocations fails in branch-2.7 |  Major | webhdfs | Arpit Agarwal | Arpit Agarwal |
-| [HDFS-8596](https://issues.apache.org/jira/browse/HDFS-8596) | TestDistributedFileSystem et al tests are broken in branch-2 due to incorrect setting of "datanode" attribute |  Blocker | HDFS | Yongjun Zhang | Yongjun Zhang |
+| [HDFS-8596](https://issues.apache.org/jira/browse/HDFS-8596) | TestDistributedFileSystem et al tests are broken in branch-2 due to incorrect setting of "datanode" attribute |  Blocker | datanode | Yongjun Zhang | Yongjun Zhang |
 | [HDFS-8595](https://issues.apache.org/jira/browse/HDFS-8595) | TestCommitBlockSynchronization fails in branch-2.7 |  Major | test | Arpit Agarwal | Arpit Agarwal |
 | [HDFS-8583](https://issues.apache.org/jira/browse/HDFS-8583) | Document that NFS gateway does not work with rpcbind on SLES 11 |  Major | documentation | Arpit Agarwal | Arpit Agarwal |
 | [HDFS-8576](https://issues.apache.org/jira/browse/HDFS-8576) |  Lease recovery should return true if the lease can be released and the file can be closed |  Major | namenode | J.Andreina | J.Andreina |
@@ -89,7 +96,6 @@
 | [HDFS-8270](https://issues.apache.org/jira/browse/HDFS-8270) | create() always retried with hardcoded timeout when file already exists with open lease |  Major | hdfs-client | Andrey Stepachev | J.Andreina |
 | [HDFS-8269](https://issues.apache.org/jira/browse/HDFS-8269) | getBlockLocations() does not resolve the .reserved path and generates incorrect edit logs when updating the atime |  Blocker | . | Yesha Vora | Haohui Mai |
 | [HDFS-8245](https://issues.apache.org/jira/browse/HDFS-8245) | Standby namenode doesn't process DELETED\_BLOCK if the add block request is in edit log. |  Major | . | Rushabh S Shah | Rushabh S Shah |
-| [HDFS-8226](https://issues.apache.org/jira/browse/HDFS-8226) | Non-HA rollback compatibility broken |  Blocker | . | J.Andreina | J.Andreina |
 | [HDFS-8213](https://issues.apache.org/jira/browse/HDFS-8213) | DFSClient should use hdfs.client.htrace HTrace configuration prefix rather than hadoop.htrace |  Critical | . | Billie Rinaldi | Colin Patrick McCabe |
 | [HDFS-8179](https://issues.apache.org/jira/browse/HDFS-8179) | DFSClient#getServerDefaults returns null within 1 hour of system start |  Blocker | . | Xiaoyu Yao | Xiaoyu Yao |
 | [HDFS-8163](https://issues.apache.org/jira/browse/HDFS-8163) | Using monotonicNow for block report scheduling causes test failures on recently restarted systems |  Blocker | datanode | Arpit Agarwal | Arpit Agarwal |
@@ -98,7 +104,7 @@
 | [HDFS-8149](https://issues.apache.org/jira/browse/HDFS-8149) | The footer of the Web UI "Hadoop, 2014" is old |  Major | . | Akira AJISAKA | Brahma Reddy Battula |
 | [HDFS-8147](https://issues.apache.org/jira/browse/HDFS-8147) | Mover should not schedule two replicas to the same DN storage |  Major | balancer & mover | Surendra Singh Lilhore | Surendra Singh Lilhore |
 | [HDFS-8127](https://issues.apache.org/jira/browse/HDFS-8127) | NameNode Failover during HA upgrade can cause DataNode to finalize upgrade |  Blocker | ha | Jing Zhao | Jing Zhao |
-| [HDFS-8091](https://issues.apache.org/jira/browse/HDFS-8091) | ACLStatus and XAttributes not properly presented to INodeAttributesProvider before returning to client |  Major | HDFS | Arun Suresh | Arun Suresh |
+| [HDFS-8091](https://issues.apache.org/jira/browse/HDFS-8091) | ACLStatus and XAttributes not properly presented to INodeAttributesProvider before returning to client |  Major | namenode | Arun Suresh | Arun Suresh |
 | [HDFS-8081](https://issues.apache.org/jira/browse/HDFS-8081) | Split getAdditionalBlock() into two methods. |  Major | . | Konstantin Shvachko | Konstantin Shvachko |
 | [HDFS-8070](https://issues.apache.org/jira/browse/HDFS-8070) | Pre-HDFS-7915 DFSClient cannot use short circuit on post-HDFS-7915 DataNode |  Blocker | caching | Gopal V | Colin Patrick McCabe |
 | [HDFS-7980](https://issues.apache.org/jira/browse/HDFS-7980) | Incremental BlockReport will dramatically slow down the startup of  a namenode |  Major | . | Hui Zheng | Walter Su |
@@ -181,6 +187,7 @@
 | [YARN-3609](https://issues.apache.org/jira/browse/YARN-3609) | Move load labels from storage from serviceInit to serviceStart to make it works with RM HA case. |  Major | resourcemanager | Wangda Tan | Wangda Tan |
 | [YARN-3544](https://issues.apache.org/jira/browse/YARN-3544) | AM logs link missing in the RM UI for a completed app |  Blocker | . | Hitesh Shah | Xuan Gong |
 | [YARN-3487](https://issues.apache.org/jira/browse/YARN-3487) | CapacityScheduler scheduler lock obtained unnecessarily when calling getQueue |  Critical | capacityscheduler | Jason Lowe | Jason Lowe |
+| [YARN-3301](https://issues.apache.org/jira/browse/YARN-3301) | Fix the format issue of the new RM web UI and AHS web UI after YARN-3272 / YARN-3262 |  Major | resourcemanager | Xuan Gong | Xuan Gong |
 | [YARN-3006](https://issues.apache.org/jira/browse/YARN-3006) | Improve the error message when attempting manual failover with auto-failover enabled |  Minor | . | Akira AJISAKA | Akira AJISAKA |
 | [YARN-2918](https://issues.apache.org/jira/browse/YARN-2918) | Don't fail RM if queue's configured labels are not existed in cluster-node-labels |  Major | resourcemanager | Rohith Sharma K S | Wangda Tan |
 | [YARN-2900](https://issues.apache.org/jira/browse/YARN-2900) | Application (Attempt and Container) Not Found in AHS results in Internal Server Error (500) |  Major | timelineserver | Jonathan Eagles | Mit Desai |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/RELEASENOTES.2.7.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/RELEASENOTES.2.7.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/RELEASENOTES.2.7.1.md
index 2b62c13..1645e08 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/RELEASENOTES.2.7.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.1/RELEASENOTES.2.7.1.md
@@ -18,16 +18,17 @@
 -->
 # Apache Hadoop  2.7.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
 
 * [HDFS-8486](https://issues.apache.org/jira/browse/HDFS-8486) | *Blocker* | **DN startup may cause severe data loss**
 
+<!-- markdown -->
 Public service notice:
-- Every restart of a 2.6.x or 2.7.0 DN incurs a risk of unwanted block deletion.
-- Apply this patch if you are running a pre-2.7.1 release.
+* Every restart of a 2.6.x or 2.7.0 DN incurs a risk of unwanted block deletion.
+* Apply this patch if you are running a pre-2.7.1 release.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/CHANGES.2.7.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/CHANGES.2.7.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/CHANGES.2.7.2.md
new file mode 100644
index 0000000..2ece330
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/CHANGES.2.7.2.md
@@ -0,0 +1,224 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 2.7.2 - 2016-01-25
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-7645](https://issues.apache.org/jira/browse/HDFS-7645) | Rolling upgrade is restoring blocks from trash multiple times |  Major | datanode | Nathan Roberts | Keisuke Ogiwara |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12825](https://issues.apache.org/jira/browse/HADOOP-12825) | Log slow name resolutions |  Major | . | Sidharta Seethana | Sidharta Seethana |
+| [HADOOP-12413](https://issues.apache.org/jira/browse/HADOOP-12413) | AccessControlList should avoid calling getGroupNames in isUserInList with empty groups. |  Major | security | zhihai xu | zhihai xu |
+| [HADOOP-12280](https://issues.apache.org/jira/browse/HADOOP-12280) | Skip unit tests based on maven profile rather than NativeCodeLoader.isNativeCodeLoaded |  Minor | test | Masatake Iwasaki | Masatake Iwasaki |
+| [HADOOP-12232](https://issues.apache.org/jira/browse/HADOOP-12232) | Upgrade Tomcat dependency to 6.0.44. |  Major | build | Chris Nauroth | Chris Nauroth |
+| [HADOOP-11812](https://issues.apache.org/jira/browse/HADOOP-11812) | Implement listLocatedStatus for ViewFileSystem to speed up split calculation |  Blocker | fs | Gera Shegalov | Gera Shegalov |
+| [HADOOP-7139](https://issues.apache.org/jira/browse/HADOOP-7139) | Allow appending to existing SequenceFiles |  Major | io | Stephen Rose | Kanaka Kumar Avvaru |
+| [HADOOP-5323](https://issues.apache.org/jira/browse/HADOOP-5323) | Trash documentation should describe its directory structure and configurations |  Minor | documentation | Suman Sehgal | Weiwei Yang |
+| [HDFS-9434](https://issues.apache.org/jira/browse/HDFS-9434) | Recommission a datanode with 500k blocks may pause NN for 30 seconds |  Major | namenode | Tsz Wo Nicholas Sze | Tsz Wo Nicholas Sze |
+| [HDFS-9221](https://issues.apache.org/jira/browse/HDFS-9221) | HdfsServerConstants#ReplicaState#getState should avoid calling values() since it creates a temporary array |  Major | performance | Staffan Friberg | Staffan Friberg |
+| [HDFS-8722](https://issues.apache.org/jira/browse/HDFS-8722) | Optimize datanode writes for small writes and flushes |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-8659](https://issues.apache.org/jira/browse/HDFS-8659) | Block scanner INFO message is spamming logs |  Major | datanode | Yongjun Zhang | Yongjun Zhang |
+| [HDFS-8384](https://issues.apache.org/jira/browse/HDFS-8384) | Allow NN to startup if there are files having a lease but are not under construction |  Minor | namenode | Tsz Wo Nicholas Sze | Jing Zhao |
+| [HDFS-7314](https://issues.apache.org/jira/browse/HDFS-7314) | When the DFSClient lease cannot be renewed, abort open-for-write files rather than the entire DFSClient |  Major | . | Ming Ma | Ming Ma |
+| [YARN-4158](https://issues.apache.org/jira/browse/YARN-4158) | Remove duplicate close for LogWriter in AppLogAggregatorImpl#uploadLogsForContainers |  Minor | nodemanager | zhihai xu | zhihai xu |
+| [YARN-3978](https://issues.apache.org/jira/browse/YARN-3978) | Configurably turn off the saving of container info in Generic AHS |  Major | timelineserver, yarn | Eric Payne | Eric Payne |
+| [YARN-3727](https://issues.apache.org/jira/browse/YARN-3727) | For better error recovery, check if the directory exists before using it for localization. |  Major | nodemanager | zhihai xu | zhihai xu |
+| [YARN-3248](https://issues.apache.org/jira/browse/YARN-3248) | Display count of nodes blacklisted by apps in the web UI |  Major | capacityscheduler, resourcemanager | Varun Vasudev | Varun Vasudev |
+| [YARN-3170](https://issues.apache.org/jira/browse/YARN-3170) | YARN architecture document needs updating |  Major | documentation | Allen Wittenauer | Brahma Reddy Battula |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-12577](https://issues.apache.org/jira/browse/HADOOP-12577) | Bump up commons-collections version to 3.2.2 to address a security flaw |  Blocker | build, security | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HADOOP-12526](https://issues.apache.org/jira/browse/HADOOP-12526) | [Branch-2] there are duplicate dependency definitions in pom's |  Major | build | Sangjin Lee | Sangjin Lee |
+| [HADOOP-12465](https://issues.apache.org/jira/browse/HADOOP-12465) | Incorrect javadoc in WritableUtils.java |  Minor | documentation | Martin Petricek | Jagadesh Kiran N |
+| [HADOOP-12464](https://issues.apache.org/jira/browse/HADOOP-12464) | Interrupted client may try to fail-over and retry |  Major | ipc | Kihwal Lee | Kihwal Lee |
+| [HADOOP-12451](https://issues.apache.org/jira/browse/HADOOP-12451) | [Branch-2] Setting HADOOP\_HOME explicitly should be allowed |  Blocker | scripts | Karthik Kambatla | Karthik Kambatla |
+| [HADOOP-12415](https://issues.apache.org/jira/browse/HADOOP-12415) | hdfs and nfs builds broken on -missing compile-time dependency on netty |  Major | nfs | Konstantin Boudnik | Tom Zeng |
+| [HADOOP-12359](https://issues.apache.org/jira/browse/HADOOP-12359) | hadoop fs -getmerge doc is wrong |  Major | documentation | Daniel Templeton | Jagadesh Kiran N |
+| [HADOOP-12304](https://issues.apache.org/jira/browse/HADOOP-12304) | Applications using FileContext fail with the default file system configured to be wasb/s3/etc. |  Blocker | fs | Chris Nauroth | Chris Nauroth |
+| [HADOOP-12230](https://issues.apache.org/jira/browse/HADOOP-12230) | hadoop-project declares duplicate, conflicting curator dependencies |  Minor | build | Steve Loughran | Rakesh R |
+| [HADOOP-12213](https://issues.apache.org/jira/browse/HADOOP-12213) | Interrupted exception can occur when Client#stop is called |  Minor | . | Oleg Zhurakousky | Kuhu Shukla |
+| [HADOOP-12191](https://issues.apache.org/jira/browse/HADOOP-12191) | Bzip2Factory is not thread safe |  Major | io | Jason Lowe | Brahma Reddy Battula |
+| [HADOOP-12186](https://issues.apache.org/jira/browse/HADOOP-12186) | ActiveStandbyElector shouldn't call monitorLockNodeAsync multiple times |  Major | ha | zhihai xu | zhihai xu |
+| [HADOOP-12006](https://issues.apache.org/jira/browse/HADOOP-12006) | Remove unimplemented option for `hadoop fs -ls` from document in branch-2.7 |  Major | . | Akira AJISAKA | Akira AJISAKA |
+| [HADOOP-11932](https://issues.apache.org/jira/browse/HADOOP-11932) |  MetricsSinkAdapter hangs when being stopped |  Critical | . | Jian He | Brahma Reddy Battula |
+| [HADOOP-11491](https://issues.apache.org/jira/browse/HADOOP-11491) | HarFs incorrectly declared as requiring an authority |  Critical | fs | Gera Shegalov | Brahma Reddy Battula |
+| [HADOOP-10365](https://issues.apache.org/jira/browse/HADOOP-10365) | BufferedOutputStream in FileUtil#unpackEntries() should be closed in finally block |  Minor | util | Ted Yu | Kiran Kumar M R |
+| [HADOOP-9242](https://issues.apache.org/jira/browse/HADOOP-9242) | Duplicate surefire plugin config in hadoop-common |  Major | test | Andrey Klochkov | Andrey Klochkov |
+| [HADOOP-8151](https://issues.apache.org/jira/browse/HADOOP-8151) | Error handling in snappy decompressor throws invalid exceptions |  Major | io, native | Todd Lipcon | Matt Foley |
+| [HDFS-9574](https://issues.apache.org/jira/browse/HDFS-9574) | Reduce client failures during datanode restart |  Major | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-9470](https://issues.apache.org/jira/browse/HDFS-9470) | Encryption zone on root not loaded from fsimage after NN restart |  Critical | . | Xiao Chen | Xiao Chen |
+| [HDFS-9445](https://issues.apache.org/jira/browse/HDFS-9445) | Datanode may deadlock while handling a bad volume |  Blocker | . | Kihwal Lee | Walter Su |
+| [HDFS-9431](https://issues.apache.org/jira/browse/HDFS-9431) | DistributedFileSystem#concat fails if the target path is relative. |  Major | hdfs-client | Kazuho Fujii | Kazuho Fujii |
+| [HDFS-9426](https://issues.apache.org/jira/browse/HDFS-9426) | Rollingupgrade finalization is not backward compatible |  Blocker | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-9413](https://issues.apache.org/jira/browse/HDFS-9413) | getContentSummary() on standby should throw StandbyException |  Critical | . | Brahma Reddy Battula | Brahma Reddy Battula |
+| [HDFS-9317](https://issues.apache.org/jira/browse/HDFS-9317) | Document fsck -blockId and -storagepolicy options in branch-2.7 |  Major | documentation | Akira AJISAKA | Akira AJISAKA |
+| [HDFS-9305](https://issues.apache.org/jira/browse/HDFS-9305) | Delayed heartbeat processing causes storm of subsequent heartbeats |  Major | datanode | Chris Nauroth | Arpit Agarwal |
+| [HDFS-9294](https://issues.apache.org/jira/browse/HDFS-9294) | DFSClient  deadlock when close file and failed to renew lease |  Blocker | hdfs-client | DENG FEI | Brahma Reddy Battula |
+| [HDFS-9290](https://issues.apache.org/jira/browse/HDFS-9290) | DFSClient#callAppend() is not backward compatible for slightly older NameNodes |  Blocker | . | Tony Wu | Tony Wu |
+| [HDFS-9289](https://issues.apache.org/jira/browse/HDFS-9289) | Make DataStreamer#block thread safe and verify genStamp in commitBlock |  Critical | . | Chang Li | Chang Li |
+| [HDFS-9273](https://issues.apache.org/jira/browse/HDFS-9273) | ACLs on root directory may be lost after NN restart |  Critical | namenode | Xiao Chen | Xiao Chen |
+| [HDFS-9220](https://issues.apache.org/jira/browse/HDFS-9220) | Reading small file (\< 512 bytes) that is open for append fails due to incorrect checksum |  Blocker | . | Bogdan Raducanu | Jing Zhao |
+| [HDFS-9178](https://issues.apache.org/jira/browse/HDFS-9178) | Slow datanode I/O can cause a wrong node to be marked bad |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-9106](https://issues.apache.org/jira/browse/HDFS-9106) | Transfer failure during pipeline recovery causes permanent write failures |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-9083](https://issues.apache.org/jira/browse/HDFS-9083) | Replication violates block placement policy. |  Blocker | namenode | Rushabh S Shah | Rushabh S Shah |
+| [HDFS-9043](https://issues.apache.org/jira/browse/HDFS-9043) | Doc updation for commands in HDFS Federation |  Minor | documentation | J.Andreina | J.Andreina |
+| [HDFS-9042](https://issues.apache.org/jira/browse/HDFS-9042) | Update document for the Storage policy name |  Minor | documentation | J.Andreina | J.Andreina |
+| [HDFS-9033](https://issues.apache.org/jira/browse/HDFS-9033) | dfsadmin -metasave prints "NaN" for cache used% |  Major | . | Archana T | Brahma Reddy Battula |
+| [HDFS-8995](https://issues.apache.org/jira/browse/HDFS-8995) | Flaw in registration bookeeping can make DN die on reconnect |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-8950](https://issues.apache.org/jira/browse/HDFS-8950) | NameNode refresh doesn't remove DataNodes that are no longer in the allowed list |  Major | datanode, namenode | Daniel Templeton | Daniel Templeton |
+| [HDFS-8891](https://issues.apache.org/jira/browse/HDFS-8891) | HDFS concat should keep srcs order |  Blocker | . | Yong Zhang | Yong Zhang |
+| [HDFS-8879](https://issues.apache.org/jira/browse/HDFS-8879) | Quota by storage type usage incorrectly initialized upon namenode restart |  Major | namenode | Kihwal Lee | Xiaoyu Yao |
+| [HDFS-8867](https://issues.apache.org/jira/browse/HDFS-8867) | Enable optimized block reports |  Major | . | Rushabh S Shah | Daryn Sharp |
+| [HDFS-8863](https://issues.apache.org/jira/browse/HDFS-8863) | The remaining space check in BlockPlacementPolicyDefault is flawed |  Critical | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-8852](https://issues.apache.org/jira/browse/HDFS-8852) | HDFS architecture documentation of version 2.x is outdated about append write support |  Major | documentation | Hong Dai Thanh | Ajith S |
+| [HDFS-8850](https://issues.apache.org/jira/browse/HDFS-8850) | VolumeScanner thread exits with exception if there is no block pool to be scanned but there are suspicious blocks |  Major | datanode | Colin Patrick McCabe | Colin Patrick McCabe |
+| [HDFS-8846](https://issues.apache.org/jira/browse/HDFS-8846) | Add a unit test for INotify functionality across a layout version upgrade |  Major | namenode | Zhe Zhang | Zhe Zhang |
+| [HDFS-8806](https://issues.apache.org/jira/browse/HDFS-8806) | Inconsistent metrics: number of missing blocks with replication factor 1 not properly cleared |  Major | . | Zhe Zhang | Zhe Zhang |
+| [HDFS-8767](https://issues.apache.org/jira/browse/HDFS-8767) | RawLocalFileSystem.listStatus() returns null for UNIX pipefile |  Critical | . | Haohui Mai | Kanaka Kumar Avvaru |
+| [HDFS-8676](https://issues.apache.org/jira/browse/HDFS-8676) | Delayed rolling upgrade finalization can cause heartbeat expiration and write failures |  Critical | . | Kihwal Lee | Walter Su |
+| [HDFS-8656](https://issues.apache.org/jira/browse/HDFS-8656) | Preserve compatibility of ClientProtocol#rollingUpgrade after finalization |  Critical | rolling upgrades | Andrew Wang | Andrew Wang |
+| [HDFS-8615](https://issues.apache.org/jira/browse/HDFS-8615) | Correct HTTP method in WebHDFS document |  Major | documentation | Akira AJISAKA | Brahma Reddy Battula |
+| [HDFS-8431](https://issues.apache.org/jira/browse/HDFS-8431) | hdfs crypto class not found in Windows |  Critical | scripts | Sumana Sathish | Anu Engineer |
+| [HDFS-8219](https://issues.apache.org/jira/browse/HDFS-8219) | setStoragePolicy with folder behavior is different after cluster restart |  Major | . | Peter Shi | Surendra Singh Lilhore |
+| [HDFS-8099](https://issues.apache.org/jira/browse/HDFS-8099) | Change "DFSInputStream has been closed already" message to debug log level |  Minor | hdfs-client | Charles Lamb | Charles Lamb |
+| [HDFS-8046](https://issues.apache.org/jira/browse/HDFS-8046) | Allow better control of getContentSummary |  Major | . | Kihwal Lee | Kihwal Lee |
+| [HDFS-7725](https://issues.apache.org/jira/browse/HDFS-7725) | Incorrect "nodes in service" metrics caused all writes to fail |  Major | . | Ming Ma | Ming Ma |
+| [HDFS-7609](https://issues.apache.org/jira/browse/HDFS-7609) | Avoid retry cache collision when Standby NameNode loading edits |  Critical | namenode | Carrey Zhan | Ming Ma |
+| [HDFS-6945](https://issues.apache.org/jira/browse/HDFS-6945) | BlockManager should remove a block from excessReplicateMap and decrement ExcessBlocks metric when the block is removed |  Critical | namenode | Akira AJISAKA | Akira AJISAKA |
+| [MAPREDUCE-6549](https://issues.apache.org/jira/browse/MAPREDUCE-6549) | multibyte delimiters with LineRecordReader cause duplicate records |  Major | mrv1, mrv2 | Dustin Cote | Wilfred Spiegelenburg |
+| [MAPREDUCE-6540](https://issues.apache.org/jira/browse/MAPREDUCE-6540) | TestMRTimelineEventHandling fails |  Major | test | Sangjin Lee | Sangjin Lee |
+| [MAPREDUCE-6528](https://issues.apache.org/jira/browse/MAPREDUCE-6528) | Memory leak for HistoryFileManager.getJobSummary() |  Critical | jobhistoryserver | Junping Du | Junping Du |
+| [MAPREDUCE-6518](https://issues.apache.org/jira/browse/MAPREDUCE-6518) | Set SO\_KEEPALIVE on shuffle connections |  Major | mrv2, nodemanager | Nathan Roberts | Chang Li |
+| [MAPREDUCE-6497](https://issues.apache.org/jira/browse/MAPREDUCE-6497) | Fix wrong value of JOB\_FINISHED event in JobHistoryEventHandler |  Major | . | Shinichi Yamashita | Shinichi Yamashita |
+| [MAPREDUCE-6492](https://issues.apache.org/jira/browse/MAPREDUCE-6492) | AsyncDispatcher exit with NPE on TaskAttemptImpl#sendJHStartEventForAssignedFailTask |  Critical | . | Bibin A Chundatt | Bibin A Chundatt |
+| [MAPREDUCE-6481](https://issues.apache.org/jira/browse/MAPREDUCE-6481) | LineRecordReader may give incomplete record and wrong position/key information for uncompressed input sometimes. |  Critical | mrv2 | zhihai xu | zhihai xu |
+| [MAPREDUCE-6474](https://issues.apache.org/jira/browse/MAPREDUCE-6474) | ShuffleHandler can possibly exhaust nodemanager file descriptors |  Major | mrv2, nodemanager | Nathan Roberts | Kuhu Shukla |
+| [MAPREDUCE-6472](https://issues.apache.org/jira/browse/MAPREDUCE-6472) | MapReduce AM should have java.io.tmpdir=./tmp to be consistent with tasks |  Major | mr-am | Jason Lowe | Naganarasimha G R |
+| [MAPREDUCE-6454](https://issues.apache.org/jira/browse/MAPREDUCE-6454) | MapReduce doesn't set the HADOOP\_CLASSPATH for jar lib in distributed cache. |  Critical | . | Junping Du | Junping Du |
+| [MAPREDUCE-6451](https://issues.apache.org/jira/browse/MAPREDUCE-6451) | DistCp has incorrect chunkFilePath for multiple jobs when strategy is dynamic |  Major | distcp | Kuhu Shukla | Kuhu Shukla |
+| [MAPREDUCE-6442](https://issues.apache.org/jira/browse/MAPREDUCE-6442) | Stack trace is missing when error occurs in client protocol provider's constructor |  Major | client | Chang Li | Chang Li |
+| [MAPREDUCE-6439](https://issues.apache.org/jira/browse/MAPREDUCE-6439) | AM may fail instead of retrying if RM shuts down during the allocate call |  Critical | . | Anubhav Dhoot | Anubhav Dhoot |
+| [MAPREDUCE-6426](https://issues.apache.org/jira/browse/MAPREDUCE-6426) | TestShuffleHandler#testGetMapOutputInfo is failing |  Major | test | Devaraj K | zhihai xu |
+| [MAPREDUCE-6425](https://issues.apache.org/jira/browse/MAPREDUCE-6425) | ShuffleHandler passes wrong "base" parameter to getMapOutputInfo if mapId is not in the cache. |  Major | mrv2, nodemanager | zhihai xu | zhihai xu |
+| [MAPREDUCE-6377](https://issues.apache.org/jira/browse/MAPREDUCE-6377) | JHS sorting on state column not working in webUi |  Minor | jobhistoryserver | Bibin A Chundatt | zhihai xu |
+| [MAPREDUCE-6273](https://issues.apache.org/jira/browse/MAPREDUCE-6273) | HistoryFileManager should check whether summaryFile exists to avoid FileNotFoundException causing HistoryFileInfo into MOVE\_FAILED state |  Minor | jobhistoryserver | zhihai xu | zhihai xu |
+| [MAPREDUCE-5982](https://issues.apache.org/jira/browse/MAPREDUCE-5982) | Task attempts that fail from the ASSIGNED state can disappear |  Major | mr-am | Jason Lowe | Chang Li |
+| [MAPREDUCE-5948](https://issues.apache.org/jira/browse/MAPREDUCE-5948) | org.apache.hadoop.mapred.LineRecordReader does not handle multibyte record delimiters well |  Critical | . | Kris Geusebroek | Akira AJISAKA |
+| [MAPREDUCE-5883](https://issues.apache.org/jira/browse/MAPREDUCE-5883) | "Total megabyte-seconds" in job counters is slightly misleading |  Minor | . | Nathan Roberts | Nathan Roberts |
+| [MAPREDUCE-5649](https://issues.apache.org/jira/browse/MAPREDUCE-5649) | Reduce cannot use more than 2G memory  for the final merge |  Major | mrv2 | stanley shi | Gera Shegalov |
+| [YARN-4434](https://issues.apache.org/jira/browse/YARN-4434) | NodeManager Disk Checker parameter documentation is not correct |  Minor | documentation, nodemanager | Takashi Ohnishi | Weiwei Yang |
+| [YARN-4424](https://issues.apache.org/jira/browse/YARN-4424) | Fix deadlock in RMAppImpl |  Blocker | . | Yesha Vora | Jian He |
+| [YARN-4365](https://issues.apache.org/jira/browse/YARN-4365) | FileSystemNodeLabelStore should check for root dir existence on startup |  Major | resourcemanager | Jason Lowe | Kuhu Shukla |
+| [YARN-4354](https://issues.apache.org/jira/browse/YARN-4354) | Public resource localization fails with NPE |  Blocker | nodemanager | Jason Lowe | Jason Lowe |
+| [YARN-4348](https://issues.apache.org/jira/browse/YARN-4348) | ZKRMStateStore.syncInternal shouldn't wait for sync completion for avoiding blocking ZK's event thread |  Blocker | . | Tsuyoshi Ozawa | Tsuyoshi Ozawa |
+| [YARN-4344](https://issues.apache.org/jira/browse/YARN-4344) | NMs reconnecting with changed capabilities can lead to wrong cluster resource calculations |  Critical | resourcemanager | Varun Vasudev | Varun Vasudev |
+| [YARN-4326](https://issues.apache.org/jira/browse/YARN-4326) | Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer binds to default port 8188 |  Major | . | MENG DING | MENG DING |
+| [YARN-4321](https://issues.apache.org/jira/browse/YARN-4321) | Incessant retries if NoAuthException is thrown by Zookeeper in non HA mode |  Major | resourcemanager | Varun Saxena | Varun Saxena |
+| [YARN-4320](https://issues.apache.org/jira/browse/YARN-4320) | TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer binds to default port 8188 |  Major | . | Varun Saxena | Varun Saxena |
+| [YARN-4313](https://issues.apache.org/jira/browse/YARN-4313) | Race condition in MiniMRYarnCluster when getting history server address |  Major | . | Jian He | Jian He |
+| [YARN-4312](https://issues.apache.org/jira/browse/YARN-4312) | TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6 as some of the test cases time out |  Major | . | Varun Saxena | Varun Saxena |
+| [YARN-4281](https://issues.apache.org/jira/browse/YARN-4281) | 2.7 RM app page is broken |  Blocker | . | Chang Li | Chang Li |
+| [YARN-4241](https://issues.apache.org/jira/browse/YARN-4241) | Fix typo of property name in yarn-default.xml |  Major | documentation | Anthony Rojas | Anthony Rojas |
+| [YARN-4209](https://issues.apache.org/jira/browse/YARN-4209) | RMStateStore FENCED state doesn’t work due to updateFencedState called by stateMachine.doTransition |  Critical | resourcemanager | zhihai xu | zhihai xu |
+| [YARN-4180](https://issues.apache.org/jira/browse/YARN-4180) | AMLauncher does not retry on failures when talking to NM |  Critical | resourcemanager | Anubhav Dhoot | Anubhav Dhoot |
+| [YARN-4127](https://issues.apache.org/jira/browse/YARN-4127) | RM fail with noAuth error if switched from failover mode to non-failover mode |  Major | resourcemanager | Jian He | Varun Saxena |
+| [YARN-4105](https://issues.apache.org/jira/browse/YARN-4105) | Capacity Scheduler headroom for DRF is wrong |  Major | capacityscheduler | Chang Li | Chang Li |
+| [YARN-4103](https://issues.apache.org/jira/browse/YARN-4103) | RM WebServices missing scheme for appattempts logLinks |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [YARN-4096](https://issues.apache.org/jira/browse/YARN-4096) | App local logs are leaked if log aggregation fails to initialize for the app |  Major | log-aggregation, nodemanager | Jason Lowe | Jason Lowe |
+| [YARN-4087](https://issues.apache.org/jira/browse/YARN-4087) | Followup fixes after YARN-2019 regarding RM behavior when state-store error occurs |  Major | . | Jian He | Jian He |
+| [YARN-4047](https://issues.apache.org/jira/browse/YARN-4047) | ClientRMService getApplications has high scheduler lock contention |  Major | resourcemanager | Jason Lowe | Jason Lowe |
+| [YARN-4041](https://issues.apache.org/jira/browse/YARN-4041) | Slow delegation token renewal can severely prolong RM recovery |  Major | resourcemanager | Jason Lowe | Sunil G |
+| [YARN-4009](https://issues.apache.org/jira/browse/YARN-4009) | CORS support for ResourceManager REST API |  Major | . | Prakash Ramachandran | Varun Vasudev |
+| [YARN-4005](https://issues.apache.org/jira/browse/YARN-4005) | Completed container whose app is finished is not removed from NMStateStore |  Major | . | Jun Gong | Jun Gong |
+| [YARN-4000](https://issues.apache.org/jira/browse/YARN-4000) | RM crashes with NPE if leaf queue becomes parent queue during restart |  Major | capacityscheduler, resourcemanager | Jason Lowe | Varun Saxena |
+| [YARN-3999](https://issues.apache.org/jira/browse/YARN-3999) | RM hangs on draining events |  Major | . | Jian He | Jian He |
+| [YARN-3990](https://issues.apache.org/jira/browse/YARN-3990) | AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when Node is connected/disconnected |  Critical | resourcemanager | Rohith Sharma K S | Bibin A Chundatt |
+| [YARN-3975](https://issues.apache.org/jira/browse/YARN-3975) | WebAppProxyServlet should not redirect to RM page if AHS is enabled |  Major | . | Mit Desai | Mit Desai |
+| [YARN-3967](https://issues.apache.org/jira/browse/YARN-3967) | Fetch the application report from the AHS if the RM does not know about it |  Major | . | Mit Desai | Mit Desai |
+| [YARN-3925](https://issues.apache.org/jira/browse/YARN-3925) | ContainerLogsUtils#getContainerLogFile fails to read container log files from full disks. |  Critical | nodemanager | zhihai xu | zhihai xu |
+| [YARN-3905](https://issues.apache.org/jira/browse/YARN-3905) | Application History Server UI NPEs when accessing apps run after RM restart |  Major | timelineserver | Eric Payne | Eric Payne |
+| [YARN-3896](https://issues.apache.org/jira/browse/YARN-3896) | RMNode transitioned from RUNNING to REBOOTED because its response id had not been reset synchronously |  Major | resourcemanager | Jun Gong | Jun Gong |
+| [YARN-3878](https://issues.apache.org/jira/browse/YARN-3878) | AsyncDispatcher can hang while stopping if it is configured for draining events on stop |  Critical | . | Varun Saxena | Varun Saxena |
+| [YARN-3857](https://issues.apache.org/jira/browse/YARN-3857) | Memory leak in ResourceManager with SIMPLE mode |  Critical | resourcemanager | mujunchao | mujunchao |
+| [YARN-3802](https://issues.apache.org/jira/browse/YARN-3802) | Two RMNodes for the same NodeId are used in RM sometimes after NM is reconnected. |  Major | resourcemanager | zhihai xu | zhihai xu |
+| [YARN-3798](https://issues.apache.org/jira/browse/YARN-3798) | ZKRMStateStore shouldn't create new session without occurrance of SESSIONEXPIED |  Blocker | resourcemanager | Bibin A Chundatt | Varun Saxena |
+| [YARN-3793](https://issues.apache.org/jira/browse/YARN-3793) | Several NPEs when deleting local files on NM recovery |  Major | nodemanager | Karthik Kambatla | Varun Saxena |
+| [YARN-3780](https://issues.apache.org/jira/browse/YARN-3780) | Should use equals when compare Resource in RMNodeImpl#ReconnectNodeTransition |  Minor | resourcemanager | zhihai xu | zhihai xu |
+| [YARN-3697](https://issues.apache.org/jira/browse/YARN-3697) | FairScheduler: ContinuousSchedulingThread can fail to shutdown |  Critical | fairscheduler | zhihai xu | zhihai xu |
+| [YARN-3690](https://issues.apache.org/jira/browse/YARN-3690) | [JDK8] 'mvn site' fails |  Major | api, site | Akira AJISAKA | Brahma Reddy Battula |
+| [YARN-3624](https://issues.apache.org/jira/browse/YARN-3624) | ApplicationHistoryServer reverses the order of the filters it gets |  Major | timelineserver | Mit Desai | Mit Desai |
+| [YARN-3619](https://issues.apache.org/jira/browse/YARN-3619) | ContainerMetrics unregisters during getMetrics and leads to ConcurrentModificationException |  Major | nodemanager | Jason Lowe | zhihai xu |
+| [YARN-3535](https://issues.apache.org/jira/browse/YARN-3535) | Scheduler must re-request container resources when RMContainer transitions from ALLOCATED to KILLED |  Critical | capacityscheduler, fairscheduler, resourcemanager | Peng Zhang | Peng Zhang |
+| [YARN-3508](https://issues.apache.org/jira/browse/YARN-3508) | Prevent processing preemption events on the main RM dispatcher |  Major | resourcemanager, scheduler | Jason Lowe | Varun Saxena |
+| [YARN-2890](https://issues.apache.org/jira/browse/YARN-2890) | MiniYarnCluster should turn on timeline service if configured to do so |  Major | . | Mit Desai | Mit Desai |
+| [YARN-2859](https://issues.apache.org/jira/browse/YARN-2859) | ApplicationHistoryServer binds to default port 8188 in MiniYARNCluster |  Critical | timelineserver | Hitesh Shah | Vinod Kumar Vavilapalli |
+| [YARN-2019](https://issues.apache.org/jira/browse/YARN-2019) | Retrospect on decision of making RM crashed if any exception throw in ZKRMStateStore |  Critical | . | Junping Du | Jian He |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-3580](https://issues.apache.org/jira/browse/YARN-3580) | [JDK 8] TestClientRMService.testGetLabelsToNodes fails |  Major | test | Robert Kanter | Robert Kanter |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-4101](https://issues.apache.org/jira/browse/YARN-4101) | RM should print alert messages if Zookeeper and Resourcemanager gets connection issue |  Critical | yarn | Yesha Vora | Xuan Gong |
+| [YARN-4092](https://issues.apache.org/jira/browse/YARN-4092) | RM HA UI redirection needs to be fixed when both RMs are in standby mode |  Major | resourcemanager | Xuan Gong | Xuan Gong |
+| [YARN-3969](https://issues.apache.org/jira/browse/YARN-3969) | Allow jobs to be submitted to reservation that is active but does not have any allocations |  Major | capacityscheduler, fairscheduler, resourcemanager | Subru Krishnan | Subru Krishnan |
+| [YARN-3893](https://issues.apache.org/jira/browse/YARN-3893) | Both RM in active state when Admin#transitionToActive failure from refeshAll() |  Critical | resourcemanager | Bibin A Chundatt | Bibin A Chundatt |
+| [YARN-3740](https://issues.apache.org/jira/browse/YARN-3740) | Fixed the typo with the configuration name: APPLICATION\_HISTORY\_PREFIX\_MAX\_APPS |  Major | resourcemanager, webapp, yarn | Xuan Gong | Xuan Gong |
+| [YARN-3700](https://issues.apache.org/jira/browse/YARN-3700) | ATS Web Performance issue at load time when large number of jobs |  Major | resourcemanager, webapp, yarn | Xuan Gong | Xuan Gong |
+| [YARN-3136](https://issues.apache.org/jira/browse/YARN-3136) | getTransferredContainers can be a bottleneck during AM registration |  Major | scheduler | Jason Lowe | Sunil G |
+| [YARN-2902](https://issues.apache.org/jira/browse/YARN-2902) | Killing a container that is localizing can orphan resources in the DOWNLOADING state |  Major | nodemanager | Jason Lowe | Varun Saxena |
+| [YARN-2801](https://issues.apache.org/jira/browse/YARN-2801) | Add documentation for node labels feature |  Major | documentation | Gururaj Shetty | Wangda Tan |
+| [YARN-2513](https://issues.apache.org/jira/browse/YARN-2513) | Host framework UIs in YARN for use with the ATS |  Major | timelineserver | Jonathan Eagles | Jonathan Eagles |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/RELEASENOTES.2.7.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/RELEASENOTES.2.7.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/RELEASENOTES.2.7.2.md
new file mode 100644
index 0000000..57cf8e7
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.7.2/RELEASENOTES.2.7.2.md
@@ -0,0 +1,35 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  2.7.2 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-7139](https://issues.apache.org/jira/browse/HADOOP-7139) | *Major* | **Allow appending to existing SequenceFiles**
+
+Existing sequence files can be appended.
+
+
+---
+
+* [HDFS-7645](https://issues.apache.org/jira/browse/HDFS-7645) | *Major* | **Rolling upgrade is restoring blocks from trash multiple times**
+
+**WARNING: No release note provided for this incompatible change.**


[23/34] hadoop git commit: HADOOP-12904. Update Yetus to 0.2.0 (aw)

Posted by ar...@apache.org.
HADOOP-12904. Update Yetus to 0.2.0 (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f9d2f66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f9d2f66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f9d2f66

Branch: refs/heads/HDFS-1312
Commit: 6f9d2f669fe21755da3235f6c679280982f4242c
Parents: 0233d4e
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Mar 8 16:45:09 2016 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Mar 8 16:45:25 2016 -0800

----------------------------------------------------------------------
 dev-support/bin/yetus-wrapper | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9d2f66/dev-support/bin/yetus-wrapper
----------------------------------------------------------------------
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index ac3e121..b064925 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -63,7 +63,7 @@ WANTED="$1"
 shift
 ARGV=("$@")
 
-HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.1.0}
+HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.2.0}
 BIN=$(yetus_abs "${BASH_SOURCE-$0}")
 BINDIR=$(dirname "${BIN}")
 
@@ -102,7 +102,7 @@ fi
 ## need to DL, etc
 ##
 
-BASEURL="https://dist.apache.org/repos/dist/release/yetus/${HADOOP_YETUS_VERSION}/"
+BASEURL="https://archive.apache.org/dist/yetus/${HADOOP_YETUS_VERSION}/"
 TARBALL="yetus-${HADOOP_YETUS_VERSION}-bin.tar"
 
 GPGBIN=$(command -v gpg)


[34/34] hadoop git commit: Merge branch 'trunk' into HDFS-1312

Posted by ar...@apache.org.
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7fd4de8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7fd4de8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7fd4de8

Branch: refs/heads/HDFS-1312
Commit: e7fd4de8b6cc8bb0ec397f9d2ad260124c3d49e6
Parents: 6fc218b 2e040d3
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Mar 9 09:44:34 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Mar 9 09:44:34 2016 -0800

----------------------------------------------------------------------
 dev-support/bin/yetus-wrapper                   |   4 +-
 hadoop-common-project/hadoop-common/pom.xml     |   1 -
 .../crypto/key/kms/KMSClientProvider.java       |   8 +
 .../main/java/org/apache/hadoop/ipc/Server.java | 134 ++++--
 .../apache/hadoop/security/ssl/SSLFactory.java  |   9 +-
 .../hadoop/util/ApplicationClassLoader.java     |   5 +-
 .../src/site/markdown/Compatibility.md          |   4 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |   2 +
 .../src/site/markdown/SecureMode.md             |  27 +-
 .../markdown/release/0.1.0/CHANGES.0.1.0.md     |   6 +
 .../release/0.1.0/RELEASENOTES.0.1.0.md         |   2 +-
 .../markdown/release/0.1.1/CHANGES.0.1.1.md     |   6 +
 .../release/0.1.1/RELEASENOTES.0.1.1.md         |   2 +-
 .../markdown/release/0.10.0/CHANGES.0.10.0.md   |   6 +
 .../release/0.10.0/RELEASENOTES.0.10.0.md       |   2 +-
 .../markdown/release/0.10.1/CHANGES.0.10.1.md   |   6 +
 .../release/0.10.1/RELEASENOTES.0.10.1.md       |   2 +-
 .../markdown/release/0.11.0/CHANGES.0.11.0.md   |   6 +
 .../release/0.11.0/RELEASENOTES.0.11.0.md       |   2 +-
 .../markdown/release/0.11.1/CHANGES.0.11.1.md   |   6 +
 .../release/0.11.1/RELEASENOTES.0.11.1.md       |   2 +-
 .../markdown/release/0.11.2/CHANGES.0.11.2.md   |   6 +
 .../release/0.11.2/RELEASENOTES.0.11.2.md       |   2 +-
 .../markdown/release/0.12.0/CHANGES.0.12.0.md   |   6 +
 .../release/0.12.0/RELEASENOTES.0.12.0.md       |   2 +-
 .../markdown/release/0.12.1/CHANGES.0.12.1.md   |   6 +
 .../release/0.12.1/RELEASENOTES.0.12.1.md       |   2 +-
 .../markdown/release/0.12.2/CHANGES.0.12.2.md   |   6 +
 .../release/0.12.2/RELEASENOTES.0.12.2.md       |   2 +-
 .../markdown/release/0.12.3/CHANGES.0.12.3.md   |   6 +
 .../release/0.12.3/RELEASENOTES.0.12.3.md       |   2 +-
 .../markdown/release/0.13.0/CHANGES.0.13.0.md   |   6 +
 .../release/0.13.0/RELEASENOTES.0.13.0.md       |   2 +-
 .../markdown/release/0.14.0/CHANGES.0.14.0.md   |   6 +
 .../release/0.14.0/RELEASENOTES.0.14.0.md       |   2 +-
 .../markdown/release/0.14.1/CHANGES.0.14.1.md   |   6 +
 .../release/0.14.1/RELEASENOTES.0.14.1.md       |   2 +-
 .../markdown/release/0.14.2/CHANGES.0.14.2.md   |   6 +
 .../release/0.14.2/RELEASENOTES.0.14.2.md       |   2 +-
 .../markdown/release/0.14.3/CHANGES.0.14.3.md   |   6 +
 .../release/0.14.3/RELEASENOTES.0.14.3.md       |   2 +-
 .../markdown/release/0.14.4/CHANGES.0.14.4.md   |   6 +
 .../release/0.14.4/RELEASENOTES.0.14.4.md       |   2 +-
 .../markdown/release/0.15.0/CHANGES.0.15.0.md   |   6 +
 .../release/0.15.0/RELEASENOTES.0.15.0.md       |   2 +-
 .../markdown/release/0.15.1/CHANGES.0.15.1.md   |   6 +
 .../release/0.15.1/RELEASENOTES.0.15.1.md       |   2 +-
 .../markdown/release/0.15.2/CHANGES.0.15.2.md   |   6 +
 .../release/0.15.2/RELEASENOTES.0.15.2.md       |   2 +-
 .../markdown/release/0.15.3/CHANGES.0.15.3.md   |   6 +
 .../release/0.15.3/RELEASENOTES.0.15.3.md       |   2 +-
 .../markdown/release/0.15.4/CHANGES.0.15.4.md   |   8 +-
 .../release/0.15.4/RELEASENOTES.0.15.4.md       |   2 +-
 .../markdown/release/0.16.0/CHANGES.0.16.0.md   |   6 +
 .../release/0.16.0/RELEASENOTES.0.16.0.md       |   2 +-
 .../markdown/release/0.16.1/CHANGES.0.16.1.md   |   6 +
 .../release/0.16.1/RELEASENOTES.0.16.1.md       |   2 +-
 .../markdown/release/0.16.2/CHANGES.0.16.2.md   |   6 +
 .../release/0.16.2/RELEASENOTES.0.16.2.md       |   2 +-
 .../markdown/release/0.16.3/CHANGES.0.16.3.md   |   6 +
 .../release/0.16.3/RELEASENOTES.0.16.3.md       |   2 +-
 .../markdown/release/0.16.4/CHANGES.0.16.4.md   |   6 +
 .../release/0.16.4/RELEASENOTES.0.16.4.md       |   2 +-
 .../markdown/release/0.17.0/CHANGES.0.17.0.md   |   6 +
 .../release/0.17.0/RELEASENOTES.0.17.0.md       |   4 +-
 .../markdown/release/0.17.1/CHANGES.0.17.1.md   |   6 +
 .../release/0.17.1/RELEASENOTES.0.17.1.md       |   2 +-
 .../markdown/release/0.17.2/CHANGES.0.17.2.md   |   6 +
 .../release/0.17.2/RELEASENOTES.0.17.2.md       |   2 +-
 .../markdown/release/0.17.3/CHANGES.0.17.3.md   |   8 +-
 .../release/0.17.3/RELEASENOTES.0.17.3.md       |   2 +-
 .../markdown/release/0.18.0/CHANGES.0.18.0.md   |   6 +
 .../release/0.18.0/RELEASENOTES.0.18.0.md       |   2 +-
 .../markdown/release/0.18.1/CHANGES.0.18.1.md   |   6 +
 .../release/0.18.1/RELEASENOTES.0.18.1.md       |   2 +-
 .../markdown/release/0.18.2/CHANGES.0.18.2.md   |   6 +
 .../release/0.18.2/RELEASENOTES.0.18.2.md       |   2 +-
 .../markdown/release/0.18.3/CHANGES.0.18.3.md   |   6 +
 .../release/0.18.3/RELEASENOTES.0.18.3.md       |   2 +-
 .../markdown/release/0.18.4/CHANGES.0.18.4.md   |   8 +-
 .../release/0.18.4/RELEASENOTES.0.18.4.md       |   2 +-
 .../markdown/release/0.19.0/CHANGES.0.19.0.md   |   6 +
 .../release/0.19.0/RELEASENOTES.0.19.0.md       |   6 +-
 .../markdown/release/0.19.1/CHANGES.0.19.1.md   |   6 +
 .../release/0.19.1/RELEASENOTES.0.19.1.md       |   2 +-
 .../markdown/release/0.19.2/CHANGES.0.19.2.md   |   6 +
 .../release/0.19.2/RELEASENOTES.0.19.2.md       |   2 +-
 .../markdown/release/0.2.0/CHANGES.0.2.0.md     |   6 +
 .../release/0.2.0/RELEASENOTES.0.2.0.md         |   2 +-
 .../markdown/release/0.2.1/CHANGES.0.2.1.md     |   6 +
 .../release/0.2.1/RELEASENOTES.0.2.1.md         |   2 +-
 .../markdown/release/0.20.0/CHANGES.0.20.0.md   |   6 +
 .../release/0.20.0/RELEASENOTES.0.20.0.md       |   4 +-
 .../markdown/release/0.20.1/CHANGES.0.20.1.md   |   6 +
 .../release/0.20.1/RELEASENOTES.0.20.1.md       |   6 +-
 .../markdown/release/0.20.2/CHANGES.0.20.2.md   |   6 +
 .../release/0.20.2/RELEASENOTES.0.20.2.md       |   2 +-
 .../release/0.20.203.0/CHANGES.0.20.203.0.md    |   6 +
 .../0.20.203.0/RELEASENOTES.0.20.203.0.md       |  10 +-
 .../release/0.20.203.1/CHANGES.0.20.203.1.md    |   8 +-
 .../0.20.203.1/RELEASENOTES.0.20.203.1.md       |   2 +-
 .../release/0.20.204.0/CHANGES.0.20.204.0.md    |   6 +
 .../0.20.204.0/RELEASENOTES.0.20.204.0.md       |   2 +-
 .../release/0.20.205.0/CHANGES.0.20.205.0.md    |   6 +
 .../0.20.205.0/RELEASENOTES.0.20.205.0.md       |   2 +-
 .../markdown/release/0.20.3/CHANGES.0.20.3.md   |   8 +-
 .../release/0.20.3/RELEASENOTES.0.20.3.md       |   2 +-
 .../markdown/release/0.21.0/CHANGES.0.21.0.md   |   6 +
 .../release/0.21.0/RELEASENOTES.0.21.0.md       |  10 +-
 .../markdown/release/0.21.1/CHANGES.0.21.1.md   |   8 +-
 .../release/0.21.1/RELEASENOTES.0.21.1.md       |   6 +-
 .../markdown/release/0.22.0/CHANGES.0.22.0.md   |   6 +
 .../release/0.22.0/RELEASENOTES.0.22.0.md       |   7 +-
 .../markdown/release/0.22.1/CHANGES.0.22.1.md   |   8 +-
 .../release/0.22.1/RELEASENOTES.0.22.1.md       |   4 +-
 .../markdown/release/0.23.0/CHANGES.0.23.0.md   |   6 +
 .../release/0.23.0/RELEASENOTES.0.23.0.md       |  18 +-
 .../markdown/release/0.23.1/CHANGES.0.23.1.md   |   6 +
 .../release/0.23.1/RELEASENOTES.0.23.1.md       |   6 +-
 .../markdown/release/0.23.10/CHANGES.0.23.10.md |   6 +
 .../release/0.23.10/RELEASENOTES.0.23.10.md     |   2 +-
 .../markdown/release/0.23.11/CHANGES.0.23.11.md |   6 +
 .../release/0.23.11/RELEASENOTES.0.23.11.md     |   2 +-
 .../markdown/release/0.23.2/CHANGES.0.23.2.md   |   8 +-
 .../release/0.23.2/RELEASENOTES.0.23.2.md       |   4 +-
 .../markdown/release/0.23.3/CHANGES.0.23.3.md   |   6 +
 .../release/0.23.3/RELEASENOTES.0.23.3.md       |   2 +-
 .../markdown/release/0.23.4/CHANGES.0.23.4.md   |   6 +
 .../release/0.23.4/RELEASENOTES.0.23.4.md       |   2 +-
 .../markdown/release/0.23.5/CHANGES.0.23.5.md   |   6 +
 .../release/0.23.5/RELEASENOTES.0.23.5.md       |   2 +-
 .../markdown/release/0.23.6/CHANGES.0.23.6.md   |   6 +
 .../release/0.23.6/RELEASENOTES.0.23.6.md       |   2 +-
 .../markdown/release/0.23.7/CHANGES.0.23.7.md   |   6 +
 .../release/0.23.7/RELEASENOTES.0.23.7.md       |   2 +-
 .../markdown/release/0.23.8/CHANGES.0.23.8.md   |   6 +
 .../release/0.23.8/RELEASENOTES.0.23.8.md       |   2 +-
 .../markdown/release/0.23.9/CHANGES.0.23.9.md   |   6 +
 .../release/0.23.9/RELEASENOTES.0.23.9.md       |   2 +-
 .../markdown/release/0.24.0/CHANGES.0.24.0.md   |   8 +-
 .../release/0.24.0/RELEASENOTES.0.24.0.md       |   2 +-
 .../markdown/release/0.3.0/CHANGES.0.3.0.md     |   6 +
 .../release/0.3.0/RELEASENOTES.0.3.0.md         |   2 +-
 .../markdown/release/0.3.1/CHANGES.0.3.1.md     |   6 +
 .../release/0.3.1/RELEASENOTES.0.3.1.md         |   2 +-
 .../markdown/release/0.3.2/CHANGES.0.3.2.md     |   6 +
 .../release/0.3.2/RELEASENOTES.0.3.2.md         |   2 +-
 .../markdown/release/0.4.0/CHANGES.0.4.0.md     |   6 +
 .../release/0.4.0/RELEASENOTES.0.4.0.md         |   2 +-
 .../markdown/release/0.5.0/CHANGES.0.5.0.md     |   6 +
 .../release/0.5.0/RELEASENOTES.0.5.0.md         |   2 +-
 .../markdown/release/0.6.0/CHANGES.0.6.0.md     |   6 +
 .../release/0.6.0/RELEASENOTES.0.6.0.md         |   2 +-
 .../markdown/release/0.6.1/CHANGES.0.6.1.md     |   6 +
 .../release/0.6.1/RELEASENOTES.0.6.1.md         |   2 +-
 .../markdown/release/0.6.2/CHANGES.0.6.2.md     |   6 +
 .../release/0.6.2/RELEASENOTES.0.6.2.md         |   2 +-
 .../markdown/release/0.7.0/CHANGES.0.7.0.md     |   6 +
 .../release/0.7.0/RELEASENOTES.0.7.0.md         |   2 +-
 .../markdown/release/0.7.1/CHANGES.0.7.1.md     |   6 +
 .../release/0.7.1/RELEASENOTES.0.7.1.md         |   2 +-
 .../markdown/release/0.7.2/CHANGES.0.7.2.md     |   6 +
 .../release/0.7.2/RELEASENOTES.0.7.2.md         |   2 +-
 .../markdown/release/0.8.0/CHANGES.0.8.0.md     |   6 +
 .../release/0.8.0/RELEASENOTES.0.8.0.md         |   2 +-
 .../markdown/release/0.9.0/CHANGES.0.9.0.md     |   6 +
 .../release/0.9.0/RELEASENOTES.0.9.0.md         |   2 +-
 .../markdown/release/0.9.1/CHANGES.0.9.1.md     |   6 +
 .../release/0.9.1/RELEASENOTES.0.9.1.md         |   2 +-
 .../markdown/release/0.9.2/CHANGES.0.9.2.md     |   6 +
 .../release/0.9.2/RELEASENOTES.0.9.2.md         |   2 +-
 .../markdown/release/1.0.0/CHANGES.1.0.0.md     |   6 +
 .../release/1.0.0/RELEASENOTES.1.0.0.md         |   2 +-
 .../markdown/release/1.0.1/CHANGES.1.0.1.md     |   6 +
 .../release/1.0.1/RELEASENOTES.1.0.1.md         |   2 +-
 .../markdown/release/1.0.2/CHANGES.1.0.2.md     |   6 +
 .../release/1.0.2/RELEASENOTES.1.0.2.md         |   2 +-
 .../markdown/release/1.0.3/CHANGES.1.0.3.md     |   6 +
 .../release/1.0.3/RELEASENOTES.1.0.3.md         |   2 +-
 .../markdown/release/1.0.4/CHANGES.1.0.4.md     |   6 +
 .../release/1.0.4/RELEASENOTES.1.0.4.md         |   2 +-
 .../markdown/release/1.1.0/CHANGES.1.1.0.md     |   6 +
 .../release/1.1.0/RELEASENOTES.1.1.0.md         |   8 +-
 .../markdown/release/1.1.1/CHANGES.1.1.1.md     |   6 +
 .../release/1.1.1/RELEASENOTES.1.1.1.md         |   2 +-
 .../markdown/release/1.1.2/CHANGES.1.1.2.md     |   6 +
 .../release/1.1.2/RELEASENOTES.1.1.2.md         |   2 +-
 .../markdown/release/1.1.3/CHANGES.1.1.3.md     |   8 +-
 .../release/1.1.3/RELEASENOTES.1.1.3.md         |   2 +-
 .../markdown/release/1.2.0/CHANGES.1.2.0.md     |   6 +
 .../release/1.2.0/RELEASENOTES.1.2.0.md         |   4 +-
 .../markdown/release/1.2.1/CHANGES.1.2.1.md     |   6 +
 .../release/1.2.1/RELEASENOTES.1.2.1.md         |   2 +-
 .../markdown/release/1.2.2/CHANGES.1.2.2.md     |   8 +-
 .../release/1.2.2/RELEASENOTES.1.2.2.md         |   2 +-
 .../markdown/release/1.3.0/CHANGES.1.3.0.md     |   8 +-
 .../release/1.3.0/RELEASENOTES.1.3.0.md         |   2 +-
 .../release/2.0.0-alpha/CHANGES.2.0.0-alpha.md  |   6 +
 .../2.0.0-alpha/RELEASENOTES.2.0.0-alpha.md     |   2 +-
 .../release/2.0.1-alpha/CHANGES.2.0.1-alpha.md  |   6 +
 .../2.0.1-alpha/RELEASENOTES.2.0.1-alpha.md     |   2 +-
 .../release/2.0.2-alpha/CHANGES.2.0.2-alpha.md  |   6 +
 .../2.0.2-alpha/RELEASENOTES.2.0.2-alpha.md     |   2 +-
 .../release/2.0.3-alpha/CHANGES.2.0.3-alpha.md  |   6 +
 .../2.0.3-alpha/RELEASENOTES.2.0.3-alpha.md     |   4 +-
 .../release/2.0.4-alpha/CHANGES.2.0.4-alpha.md  |   6 +
 .../2.0.4-alpha/RELEASENOTES.2.0.4-alpha.md     |   2 +-
 .../release/2.0.5-alpha/CHANGES.2.0.5-alpha.md  |   6 +
 .../2.0.5-alpha/RELEASENOTES.2.0.5-alpha.md     |   2 +-
 .../release/2.0.6-alpha/CHANGES.2.0.6-alpha.md  |   6 +
 .../2.0.6-alpha/RELEASENOTES.2.0.6-alpha.md     |   2 +-
 .../release/2.1.0-beta/CHANGES.2.1.0-beta.md    |   6 +
 .../2.1.0-beta/RELEASENOTES.2.1.0-beta.md       |   2 +-
 .../release/2.1.1-beta/CHANGES.2.1.1-beta.md    |   6 +
 .../2.1.1-beta/RELEASENOTES.2.1.1-beta.md       |   2 +-
 .../markdown/release/2.2.0/CHANGES.2.2.0.md     |   6 +
 .../release/2.2.0/RELEASENOTES.2.2.0.md         |   2 +-
 .../markdown/release/2.2.1/CHANGES.2.2.1.md     |   8 +-
 .../release/2.2.1/RELEASENOTES.2.2.1.md         |   2 +-
 .../markdown/release/2.3.0/CHANGES.2.3.0.md     |  13 +-
 .../release/2.3.0/RELEASENOTES.2.3.0.md         |   3 +-
 .../markdown/release/2.4.0/CHANGES.2.4.0.md     |   7 +
 .../release/2.4.0/RELEASENOTES.2.4.0.md         |   2 +-
 .../markdown/release/2.4.1/CHANGES.2.4.1.md     |   6 +
 .../release/2.4.1/RELEASENOTES.2.4.1.md         |   2 +-
 .../markdown/release/2.5.0/CHANGES.2.5.0.md     |   8 +-
 .../release/2.5.0/RELEASENOTES.2.5.0.md         |  16 +-
 .../markdown/release/2.5.1/CHANGES.2.5.1.md     |   6 +
 .../release/2.5.1/RELEASENOTES.2.5.1.md         |   2 +-
 .../markdown/release/2.5.2/CHANGES.2.5.2.md     |   6 +
 .../release/2.5.2/RELEASENOTES.2.5.2.md         |   2 +-
 .../markdown/release/2.6.0/CHANGES.2.6.0.md     |  11 +-
 .../release/2.6.0/RELEASENOTES.2.6.0.md         |  37 +-
 .../markdown/release/2.6.1/CHANGES.2.6.1.md     | 228 +++++++++
 .../release/2.6.1/RELEASENOTES.2.6.1.md         |  45 ++
 .../markdown/release/2.6.2/CHANGES.2.6.2.md     |  83 ++++
 .../release/2.6.2/RELEASENOTES.2.6.2.md         |  21 +
 .../markdown/release/2.6.3/CHANGES.2.6.3.md     | 103 ++++
 .../release/2.6.3/RELEASENOTES.2.6.3.md         |  21 +
 .../markdown/release/2.6.4/CHANGES.2.6.4.md     | 114 +++++
 .../release/2.6.4/RELEASENOTES.2.6.4.md         |  28 ++
 .../markdown/release/2.7.0/CHANGES.2.7.0.md     |  16 +-
 .../release/2.7.0/RELEASENOTES.2.7.0.md         |  83 +---
 .../markdown/release/2.7.1/CHANGES.2.7.1.md     |  17 +-
 .../release/2.7.1/RELEASENOTES.2.7.1.md         |   7 +-
 .../markdown/release/2.7.2/CHANGES.2.7.2.md     | 224 +++++++++
 .../release/2.7.2/RELEASENOTES.2.7.2.md         |  35 ++
 .../java/org/apache/hadoop/ipc/TestServer.java  |  71 ++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |   9 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java    |   8 +-
 .../hdfs/protocol/ErasureCodingPolicy.java      |  20 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java     |   7 +
 .../hadoop/hdfs/protocol/HdfsFileStatus.java    |   2 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   5 +-
 .../src/main/proto/hdfs.proto                   |   1 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |   3 -
 .../server/blockmanagement/BlockManager.java    |  16 +-
 .../BlockReconstructionWork.java                |  19 +-
 .../blockmanagement/ErasureCodingWork.java      |  67 ++-
 .../server/blockmanagement/ReplicationWork.java |   2 +-
 .../blockmanagement/UnderReplicatedBlocks.java  |   2 +-
 .../hdfs/server/datanode/BPServiceActor.java    |   5 +
 .../datanode/metrics/DataNodeMetrics.java       |   5 +
 .../namenode/ErasureCodingPolicyManager.java    |  60 ++-
 .../server/namenode/FSDirErasureCodingOp.java   |  10 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  20 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  91 +++-
 .../server/namenode/INodeFileAttributes.java    |  23 +-
 .../hdfs/server/namenode/TransferFsImage.java   |  33 +-
 .../hdfs/TestDecommissionWithStriped.java       | 473 +++++++++++++++++++
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  43 +-
 ...constructStripedBlocksWithRackAwareness.java |  49 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   3 +-
 .../server/datanode/TestBlockReplacement.java   |  18 +-
 .../hdfs/server/namenode/TestFSImage.java       |   2 +-
 .../server/namenode/TestStripedINodeFile.java   |   8 +-
 .../namenode/ha/TestStandbyCheckpoints.java     |   2 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  |  13 +-
 .../mapreduce/v2/jobhistory/JHAdminConfig.java  |  13 +
 .../src/main/resources/mapred-default.xml       |  36 ++
 .../mapreduce/v2/hs/HistoryClientService.java   |   1 +
 hadoop-mapreduce-project/pom.xml                |   1 -
 .../fs/azure/AzureNativeFileSystemStore.java    |   4 +-
 ...ativeAzureFileSystemAtomicRenameDirList.java |  49 ++
 .../hadoop/yarn/conf/YarnConfiguration.java     |  24 +
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |  42 ++
 .../src/main/resources/yarn-default.xml         |  78 +++
 .../ApplicationHistoryServer.java               |  19 +-
 .../nodemanager/LinuxContainerExecutor.java     |   4 +-
 .../linux/privileged/PrivilegedOperation.java   |  23 +-
 .../privileged/PrivilegedOperationExecutor.java |  21 +-
 .../linux/resources/CGroupsHandlerImpl.java     |   2 +-
 .../linux/resources/ResourceHandlerModule.java  |  27 +-
 .../linux/resources/TrafficController.java      |   2 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |  11 +-
 .../DelegatingLinuxContainerRuntime.java        |  13 +-
 .../runtime/DockerLinuxContainerRuntime.java    |  32 +-
 .../server/nodemanager/webapp/WebServer.java    |   1 +
 .../container-executor/impl/configuration.c     |  98 ++--
 .../container-executor/impl/configuration.h     |  28 +-
 .../impl/container-executor.c                   |  27 +-
 .../impl/container-executor.h                   |  13 +-
 .../main/native/container-executor/impl/main.c  |   4 +-
 .../test/test-container-executor.c              |   8 +-
 .../TestPrivilegedOperationExecutor.java        |   4 +-
 .../linux/resources/TestCGroupsHandlerImpl.java |   2 +-
 .../runtime/TestDockerContainerRuntime.java     |  15 +
 .../server/resourcemanager/RMAppManager.java    |  45 +-
 .../server/resourcemanager/ResourceManager.java |   1 +
 .../scheduler/SchedulerUtils.java               |  56 ++-
 .../scheduler/fair/FairScheduler.java           |   4 +-
 .../resourcemanager/webapp/RMAppsBlock.java     |   9 +-
 .../resourcemanager/TestApplicationACLs.java    |  36 ++
 .../scheduler/TestAbstractYarnScheduler.java    | 132 ++++++
 .../scheduler/TestSchedulerUtils.java           |  34 +-
 .../capacity/TestCapacityScheduler.java         | 110 -----
 .../yarn/webapp/TestRMWithCSRFFilter.java       | 231 +++++++++
 .../server/webproxy/WebAppProxyServlet.java     |   2 +-
 .../server/webproxy/TestWebAppProxyServlet.java |   2 +-
 .../src/site/markdown/TimelineServer.md         |   2 +-
 hadoop-yarn-project/pom.xml                     |   9 -
 321 files changed, 3831 insertions(+), 811 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7fd4de8/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------


[30/34] hadoop git commit: HDFS-7866. Erasure coding: NameNode manages multiple erasure coding policies. Contributed by Rui Li.

Posted by ar...@apache.org.
HDFS-7866. Erasure coding: NameNode manages multiple erasure coding policies. Contributed by Rui Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7600e3c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7600e3c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7600e3c4

Branch: refs/heads/HDFS-1312
Commit: 7600e3c48ff2043654dbe9f415a186a336b5ea6c
Parents: 89b16d2
Author: Zhe Zhang <zh...@apache.org>
Authored: Tue Mar 8 22:30:30 2016 -0800
Committer: Zhe Zhang <zh...@apache.org>
Committed: Tue Mar 8 22:30:30 2016 -0800

----------------------------------------------------------------------
 .../hdfs/protocol/ErasureCodingPolicy.java      | 20 ++++-
 .../hadoop/hdfs/protocol/HdfsConstants.java     |  7 ++
 .../hadoop/hdfs/protocol/HdfsFileStatus.java    |  2 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  5 +-
 .../src/main/proto/hdfs.proto                   |  1 +
 .../blockmanagement/UnderReplicatedBlocks.java  |  2 +-
 .../namenode/ErasureCodingPolicyManager.java    | 60 +++++++------
 .../server/namenode/FSDirErasureCodingOp.java   | 10 +--
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 20 +++--
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 91 ++++++++++++++------
 .../server/namenode/INodeFileAttributes.java    | 23 ++---
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 43 ++++++---
 ...constructStripedBlocksWithRackAwareness.java | 49 ++++++++---
 .../hdfs/server/datanode/TestBlockRecovery.java |  3 +-
 .../hdfs/server/namenode/TestFSImage.java       |  2 +-
 .../server/namenode/TestStripedINodeFile.java   |  8 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 13 +--
 17 files changed, 246 insertions(+), 113 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
index 068156d..2c748d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -31,11 +31,25 @@ public final class ErasureCodingPolicy {
   private final String name;
   private final ECSchema schema;
   private final int cellSize;
+  private final byte id;
 
-  public ErasureCodingPolicy(String name, ECSchema schema, int cellSize){
+  public ErasureCodingPolicy(String name, ECSchema schema,
+      int cellSize, byte id) {
     this.name = name;
     this.schema = schema;
     this.cellSize = cellSize;
+    this.id = id;
+  }
+
+  public ErasureCodingPolicy(ECSchema schema, int cellSize, byte id) {
+    this(composePolicyName(schema, cellSize), schema, cellSize, id);
+  }
+
+  private static String composePolicyName(ECSchema schema, int cellSize) {
+    assert cellSize % 1024 == 0;
+    return schema.getCodecName().toUpperCase() + "-" +
+        schema.getNumDataUnits() + "-" + schema.getNumParityUnits() +
+        "-" + cellSize / 1024 + "k";
   }
 
   public String getName() {
@@ -58,6 +72,10 @@ public final class ErasureCodingPolicy {
     return schema.getNumParityUnits();
   }
 
+  public byte getId() {
+    return id;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index ac28780..3c5c441 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.util.StringUtils;
 
 @InterfaceAudience.Private
@@ -144,6 +145,12 @@ public final class HdfsConstants {
     ALL, LIVE, DEAD, DECOMMISSIONING
   }
 
+  public static final ECSchema RS_6_3_SCHEMA = new ECSchema("rs", 6, 3);
+  public static final byte RS_6_3_POLICY_ID = 0;
+
+  public static final ECSchema RS_3_2_SCHEMA = new ECSchema("rs", 3, 2);
+  public static final byte RS_3_2_POLICY_ID = 1;
+
   /* Hidden constructor */
   protected HdfsConstants() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 0b5b836..2a04800 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -79,7 +79,7 @@ public class HdfsFileStatus {
       byte storagePolicy, ErasureCodingPolicy ecPolicy) {
     this.length = length;
     this.isdir = isdir;
-    this.block_replication = (short)block_replication;
+    this.block_replication = ecPolicy == null ? (short) block_replication : 0;
     this.blocksize = blocksize;
     this.modification_time = modification_time;
     this.access_time = access_time;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 6989cb9..38e875c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -2487,7 +2487,7 @@ public class PBHelperClient {
       ErasureCodingPolicyProto policy) {
     return new ErasureCodingPolicy(policy.getName(),
         convertECSchema(policy.getSchema()),
-        policy.getCellSize());
+        policy.getCellSize(), (byte) policy.getId());
   }
 
   public static ErasureCodingPolicyProto convertErasureCodingPolicy(
@@ -2496,7 +2496,8 @@ public class PBHelperClient {
         .newBuilder()
         .setName(policy.getName())
         .setSchema(convertECSchema(policy.getSchema()))
-        .setCellSize(policy.getCellSize());
+        .setCellSize(policy.getCellSize())
+        .setId(policy.getId());
     return builder.build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index b6f91e7..0db8a3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -337,6 +337,7 @@ message ErasureCodingPolicyProto {
   required string name = 1;
   required ECSchemaProto schema = 2;
   required uint32 cellSize = 3;
+  required uint32 id = 4; // Actually a byte - only 8 bits used
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
index d4938c5..5e8f7ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
@@ -208,7 +208,7 @@ class UnderReplicatedBlocks implements Iterable<BlockInfo> {
       // highest risk of loss, highest priority
       return QUEUE_HIGHEST_PRIORITY;
     } else if ((curReplicas - dataBlkNum) * 3 < parityBlkNum + 1) {
-      // can only afford one replica loss
+      // there is less than a third as many blocks as requested;
       // this is considered very under-replicated
       return QUEUE_VERY_UNDER_REPLICATED;
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index b77279b..eee80d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 
 import java.util.Map;
 import java.util.TreeMap;
@@ -37,33 +37,29 @@ public final class ErasureCodingPolicyManager {
   /**
    * TODO: HDFS-8095
    */
-  private static final int DEFAULT_DATA_BLOCKS = 6;
-  private static final int DEFAULT_PARITY_BLOCKS = 3;
   private static final int DEFAULT_CELLSIZE = 64 * 1024;
-  private static final String DEFAULT_CODEC_NAME = "rs";
-  private static final String DEFAULT_POLICY_NAME = "RS-6-3-64k";
-  private static final ECSchema SYS_DEFAULT_SCHEMA = new ECSchema(
-      DEFAULT_CODEC_NAME, DEFAULT_DATA_BLOCKS, DEFAULT_PARITY_BLOCKS);
-  private static final ErasureCodingPolicy SYS_DEFAULT_POLICY =
-      new ErasureCodingPolicy(DEFAULT_POLICY_NAME, SYS_DEFAULT_SCHEMA,
-      DEFAULT_CELLSIZE);
+  private static final ErasureCodingPolicy SYS_POLICY1 =
+      new ErasureCodingPolicy(HdfsConstants.RS_6_3_SCHEMA, DEFAULT_CELLSIZE,
+          HdfsConstants.RS_6_3_POLICY_ID);
+  private static final ErasureCodingPolicy SYS_POLICY2 =
+      new ErasureCodingPolicy(HdfsConstants.RS_3_2_SCHEMA, DEFAULT_CELLSIZE,
+          HdfsConstants.RS_3_2_POLICY_ID);
 
   //We may add more later.
-  private static ErasureCodingPolicy[] SYS_POLICY = new ErasureCodingPolicy[] {
-      SYS_DEFAULT_POLICY
-  };
+  private static final ErasureCodingPolicy[] SYS_POLICIES =
+      new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2};
 
   /**
    * All active policies maintained in NN memory for fast querying,
    * identified and sorted by its name.
    */
-  private final Map<String, ErasureCodingPolicy> activePolicies;
+  private final Map<String, ErasureCodingPolicy> activePoliciesByName;
 
   ErasureCodingPolicyManager() {
 
-    this.activePolicies = new TreeMap<>();
-    for (ErasureCodingPolicy policy : SYS_POLICY) {
-      activePolicies.put(policy.getName(), policy);
+    this.activePoliciesByName = new TreeMap<>();
+    for (ErasureCodingPolicy policy : SYS_POLICIES) {
+      activePoliciesByName.put(policy.getName(), policy);
     }
 
     /**
@@ -77,8 +73,8 @@ public final class ErasureCodingPolicyManager {
    * Get system defined policies.
    * @return system policies
    */
-  public static ErasureCodingPolicy[] getSystemPolices() {
-    return SYS_POLICY;
+  public static ErasureCodingPolicy[] getSystemPolicies() {
+    return SYS_POLICIES;
   }
 
   /**
@@ -87,7 +83,8 @@ public final class ErasureCodingPolicyManager {
    * @return ecPolicy
    */
   public static ErasureCodingPolicy getSystemDefaultPolicy() {
-    return SYS_DEFAULT_POLICY;
+    // make this configurable?
+    return SYS_POLICY1;
   }
 
   /**
@@ -95,21 +92,34 @@ public final class ErasureCodingPolicyManager {
    * @return all policies
    */
   public ErasureCodingPolicy[] getPolicies() {
-    ErasureCodingPolicy[] results = new ErasureCodingPolicy[activePolicies.size()];
-    return activePolicies.values().toArray(results);
+    ErasureCodingPolicy[] results =
+        new ErasureCodingPolicy[activePoliciesByName.size()];
+    return activePoliciesByName.values().toArray(results);
   }
 
   /**
    * Get the policy specified by the policy name.
    */
-  public ErasureCodingPolicy getPolicy(String name) {
-    return activePolicies.get(name);
+  public ErasureCodingPolicy getPolicyByName(String name) {
+    return activePoliciesByName.get(name);
+  }
+
+  /**
+   * Get the policy specified by the policy ID.
+   */
+  public ErasureCodingPolicy getPolicyByID(byte id) {
+    for (ErasureCodingPolicy policy : activePoliciesByName.values()) {
+      if (policy.getId() == id) {
+        return policy;
+      }
+    }
+    return null;
   }
 
   /**
    * Clear and clean up
    */
   public void clear() {
-    activePolicies.clear();
+    activePoliciesByName.clear();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 0051c5f..7405e1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -248,12 +248,10 @@ final class FSDirErasureCodingOp {
         if (inode == null) {
           continue;
         }
-        /**
-         * TODO: lookup {@link ErasureCodingPolicyManager#getSystemPolices()}
-         */
         if (inode.isFile()) {
-          return inode.asFile().getErasureCodingPolicyID() == 0 ?
-              null : ErasureCodingPolicyManager.getSystemDefaultPolicy();
+          byte id = inode.asFile().getErasureCodingPolicyID();
+          return id < 0 ? null : fsd.getFSNamesystem().
+              getErasureCodingPolicyManager().getPolicyByID(id);
         }
         // We don't allow setting EC policies on paths with a symlink. Thus
         // if a symlink is encountered, the dir shouldn't have EC policy.
@@ -269,7 +267,7 @@ final class FSDirErasureCodingOp {
             DataInputStream dIn = new DataInputStream(bIn);
             String ecPolicyName = WritableUtils.readString(dIn);
             return fsd.getFSNamesystem().getErasureCodingPolicyManager().
-                getPolicy(ecPolicyName);
+                getPolicyByName(ecPolicyName);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 6ba8e1c..cc08528 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -497,16 +497,19 @@ class FSDirWriteFileOp {
     assert fsd.hasWriteLock();
     try {
       // check if the file has an EC policy
-      final boolean isStriped = FSDirErasureCodingOp.hasErasureCodingPolicy(
-          fsd.getFSNamesystem(), existing);
+      ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
+          getErasureCodingPolicy(fsd.getFSNamesystem(), existing);
+      if (ecPolicy != null) {
+        replication = ecPolicy.getId();
+      }
       if (underConstruction) {
         newNode = newINodeFile(id, permissions, modificationTime,
             modificationTime, replication, preferredBlockSize, storagePolicyId,
-            isStriped);
+            ecPolicy != null);
         newNode.toUnderConstruction(clientName, clientMachine);
       } else {
         newNode = newINodeFile(id, permissions, modificationTime, atime,
-            replication, preferredBlockSize, storagePolicyId, isStriped);
+            replication, preferredBlockSize, storagePolicyId, ecPolicy != null);
       }
       newNode.setLocalName(localName);
       INodesInPath iip = fsd.addINode(existing, newNode);
@@ -595,10 +598,13 @@ class FSDirWriteFileOp {
     INodesInPath newiip;
     fsd.writeLock();
     try {
-      final boolean isStriped = FSDirErasureCodingOp.hasErasureCodingPolicy(
-          fsd.getFSNamesystem(), existing);
+      ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
+          getErasureCodingPolicy(fsd.getFSNamesystem(), existing);
+      if (ecPolicy != null) {
+        replication = ecPolicy.getId();
+      }
       INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
-          modTime, modTime, replication, preferredBlockSize, isStriped);
+          modTime, modTime, replication, preferredBlockSize, ecPolicy != null);
       newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
       newNode.toUnderConstruction(clientName, clientMachine);
       newiip = fsd.addINode(existing, newNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 5368475..5c10c86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -82,24 +82,57 @@ public class INodeFile extends INodeWithAdditionalFields
 
   /** 
    * Bit format:
-   * [4-bit storagePolicyID][1-bit isStriped]
-   * [11-bit replication][48-bit preferredBlockSize]
+   * [4-bit storagePolicyID][12-bit BLOCK_LAYOUT_AND_REDUNDANCY]
+   * [48-bit preferredBlockSize]
+   *
+   * BLOCK_LAYOUT_AND_REDUNDANCY contains 12 bits and describes the layout and
+   * redundancy of a block. We use the highest 1 bit to determine whether the
+   * block is replica or erasure coded. For replica blocks, the tail 11 bits
+   * stores the replication factor. For erasure coded blocks, the tail 11 bits
+   * stores the EC policy ID, and in the future, we may further divide these
+   * 11 bits to store both the EC policy ID and replication factor for erasure
+   * coded blocks. The layout of this section is demonstrated as below.
+   * +---------------+-------------------------------+
+   * |     1 bit     |             11 bit            |
+   * +---------------+-------------------------------+
+   * | Replica or EC |Replica factor or EC policy ID |
+   * +---------------+-------------------------------+
+   *
+   * BLOCK_LAYOUT_AND_REDUNDANCY format for replicated block:
+   * 0 [11-bit replication]
+   *
+   * BLOCK_LAYOUT_AND_REDUNDANCY format for striped block:
+   * 1 [11-bit ErasureCodingPolicy ID]
    */
   enum HeaderFormat {
     PREFERRED_BLOCK_SIZE(null, 48, 1),
-    REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 11, 0),
-    IS_STRIPED(REPLICATION.BITS, 1, 0),
-    STORAGE_POLICY_ID(IS_STRIPED.BITS, BlockStoragePolicySuite.ID_BIT_LENGTH,
-        0);
+    BLOCK_LAYOUT_AND_REDUNDANCY(PREFERRED_BLOCK_SIZE.BITS,
+        HeaderFormat.LAYOUT_BIT_WIDTH + 11, 0),
+    STORAGE_POLICY_ID(BLOCK_LAYOUT_AND_REDUNDANCY.BITS,
+        BlockStoragePolicySuite.ID_BIT_LENGTH, 0);
 
     private final LongBitFormat BITS;
 
+    /**
+     * Number of bits used to encode block layout type.
+     * Different types can be replica or EC
+     */
+    private static final int LAYOUT_BIT_WIDTH = 1;
+
+    private static final int MAX_REDUNDANCY = (1 << 11) - 1;
+
     HeaderFormat(LongBitFormat previous, int length, long min) {
       BITS = new LongBitFormat(name(), previous, length, min);
     }
 
     static short getReplication(long header) {
-      return (short)REPLICATION.BITS.retrieve(header);
+      long layoutRedundancy = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
+      return (short) (layoutRedundancy & MAX_REDUNDANCY);
+    }
+
+    static byte getECPolicyID(long header) {
+      long layoutRedundancy = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
+      return (byte) (layoutRedundancy & MAX_REDUNDANCY);
     }
 
     static long getPreferredBlockSize(long header) {
@@ -111,26 +144,27 @@ public class INodeFile extends INodeWithAdditionalFields
     }
 
     static boolean isStriped(long header) {
-      long isStriped = IS_STRIPED.BITS.retrieve(header);
-      Preconditions.checkState(isStriped == 0 || isStriped == 1);
-      return isStriped == 1;
+      long layoutRedundancy = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
+      return (layoutRedundancy & (1 << 11)) != 0;
     }
 
     static long toLong(long preferredBlockSize, short replication,
         boolean isStriped, byte storagePolicyID) {
+      Preconditions.checkArgument(replication >= 0 &&
+          replication <= MAX_REDUNDANCY);
       long h = 0;
       if (preferredBlockSize == 0) {
         preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
       }
       h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
-      // Replication factor for striped files is zero
+      // For erasure coded files, replication is used to store ec policy id
+      // TODO: this is hacky. Add some utility to generate the layoutRedundancy
+      long layoutRedundancy = 0;
       if (isStriped) {
-        h = REPLICATION.BITS.combine(0L, h);
-        h = IS_STRIPED.BITS.combine(1L, h);
-      } else {
-        h = REPLICATION.BITS.combine(replication, h);
-        h = IS_STRIPED.BITS.combine(0L, h);
+        layoutRedundancy |= 1 << 11;
       }
+      layoutRedundancy |= replication;
+      h = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.combine(layoutRedundancy, h);
       h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
       return h;
     }
@@ -401,9 +435,11 @@ public class INodeFile extends INodeWithAdditionalFields
     return HeaderFormat.getReplication(header);
   }
 
-  /** The same as getFileReplication(null). */
+  /**
+   * The same as getFileReplication(null).
+   * For erasure coded files, this returns the EC policy ID.
+   * */
   @Override // INodeFileAttributes
-  // TODO properly handle striped files
   public final short getFileReplication() {
     return getFileReplication(CURRENT_STATE_ID);
   }
@@ -429,7 +465,12 @@ public class INodeFile extends INodeWithAdditionalFields
 
   /** Set the replication factor of this file. */
   private void setFileReplication(short replication) {
-    header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
+    long layoutRedundancy =
+        HeaderFormat.BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
+    layoutRedundancy = (layoutRedundancy &
+        ~HeaderFormat.MAX_REDUNDANCY) | replication;
+    header = HeaderFormat.BLOCK_LAYOUT_AND_REDUNDANCY.BITS.
+        combine(layoutRedundancy, header);
   }
 
   /** Set the replication factor of this file. */
@@ -474,16 +515,16 @@ public class INodeFile extends INodeWithAdditionalFields
 
 
   /**
-   * @return The ID of the erasure coding policy on the file. 0 represents no
-   *          EC policy (file is in contiguous format). 1 represents the system
-   *          default EC policy:
-   *          {@link ErasureCodingPolicyManager#SYS_DEFAULT_POLICY}.
-   * TODO: support more policies by reusing {@link HeaderFormat#REPLICATION}.
+   * @return The ID of the erasure coding policy on the file. -1 represents no
+   *          EC policy.
    */
   @VisibleForTesting
   @Override
   public byte getErasureCodingPolicyID() {
-    return isStriped() ? (byte)1 : (byte)0;
+    if (isStriped()) {
+      return HeaderFormat.getECPolicyID(header);
+    }
+    return -1;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
index 8cd481a..36d93c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
@@ -27,26 +27,26 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
 @InterfaceAudience.Private
 public interface INodeFileAttributes extends INodeAttributes {
   /** @return the file replication. */
-  public short getFileReplication();
+  short getFileReplication();
 
   /** @return whether the file is striped (instead of contiguous) */
-  public boolean isStriped();
+  boolean isStriped();
 
-  /** @return whether the file is striped (instead of contiguous) */
-  public byte getErasureCodingPolicyID();
+  /** @return the ID of the ErasureCodingPolicy */
+  byte getErasureCodingPolicyID();
 
   /** @return preferred block size in bytes */
-  public long getPreferredBlockSize();
+  long getPreferredBlockSize();
 
   /** @return the header as a long. */
-  public long getHeaderLong();
+  long getHeaderLong();
 
-  public boolean metadataEquals(INodeFileAttributes other);
+  boolean metadataEquals(INodeFileAttributes other);
 
-  public byte getLocalStoragePolicyID();
+  byte getLocalStoragePolicyID();
 
   /** A copy of the inode file attributes */
-  public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
+  static class SnapshotCopy extends INodeAttributes.SnapshotCopy
       implements INodeFileAttributes {
     private final long header;
 
@@ -82,7 +82,10 @@ public interface INodeFileAttributes extends INodeAttributes {
 
     @Override
     public byte getErasureCodingPolicyID() {
-      return isStriped() ? (byte)1 : (byte)0;
+      if (isStriped()) {
+        return HeaderFormat.getECPolicyID(header);
+      }
+      return -1;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 1328e3e..b7c3ed8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -35,6 +35,7 @@ import org.junit.Test;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collection;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
@@ -225,8 +226,11 @@ public class TestErasureCodingPolicies {
     fs.create(fooFile, FsPermission.getFileDefault(), true,
         conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
         (short)0, fs.getDefaultBlockSize(fooFile), null);
+    ErasureCodingPolicy policy = fs.getErasureCodingPolicy(fooFile);
     // set replication should be a no-op
     fs.setReplication(fooFile, (short) 3);
+    // should preserve the policy after set replication
+    assertEquals(policy, fs.getErasureCodingPolicy(fooFile));
   }
 
   @Test(timeout = 60000)
@@ -247,9 +251,10 @@ public class TestErasureCodingPolicies {
 
   @Test(timeout = 60000)
   public void testGetErasureCodingPolicy() throws Exception {
-    ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices();
-    assertTrue("System ecPolicies should be of only 1 for now",
-        sysECPolicies.length == 1);
+    ErasureCodingPolicy[] sysECPolicies =
+        ErasureCodingPolicyManager.getSystemPolicies();
+    assertTrue("System ecPolicies should exist",
+        sysECPolicies.length > 0);
 
     ErasureCodingPolicy usingECPolicy = sysECPolicies[0];
     String src = "/ec2";
@@ -281,7 +286,7 @@ public class TestErasureCodingPolicies {
     String policyName = "RS-4-2-128k";
     int cellSize = 128 * 1024;
     ErasureCodingPolicy ecPolicy=
-        new ErasureCodingPolicy(policyName,rsSchema,cellSize);
+        new ErasureCodingPolicy(policyName, rsSchema, cellSize, (byte) -1);
     String src = "/ecDir4-2";
     final Path ecDir = new Path(src);
     try {
@@ -298,16 +303,11 @@ public class TestErasureCodingPolicies {
   @Test(timeout = 60000)
   public void testGetAllErasureCodingPolicies() throws Exception {
     ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager
-        .getSystemPolices();
-    assertTrue("System ecPolicies should be of only 1 for now",
-        sysECPolicies.length == 1);
-
+        .getSystemPolicies();
     Collection<ErasureCodingPolicy> allECPolicies = fs
         .getAllErasureCodingPolicies();
-    assertTrue("All ecPolicies should be of only 1 for now",
-        allECPolicies.size() == 1);
-    assertEquals("Erasure coding policy mismatches",
-        sysECPolicies[0], allECPolicies.iterator().next());
+    assertTrue("All system policies should be active",
+        allECPolicies.containsAll(Arrays.asList(sysECPolicies)));
   }
 
   @Test(timeout = 60000)
@@ -329,4 +329,23 @@ public class TestErasureCodingPolicies {
       assertExceptionContains("Path not found: " + path, e);
     }
   }
+
+  @Test(timeout = 60000)
+  public void testMultiplePoliciesCoExist() throws Exception {
+    ErasureCodingPolicy[] sysPolicies =
+        ErasureCodingPolicyManager.getSystemPolicies();
+    if (sysPolicies.length > 1) {
+      for (ErasureCodingPolicy policy : sysPolicies) {
+        Path dir = new Path("/policy_" + policy.getId());
+        fs.mkdir(dir, FsPermission.getDefault());
+        fs.setErasureCodingPolicy(dir, policy);
+        Path file = new Path(dir, "child");
+        fs.create(file).close();
+        assertEquals(policy, fs.getErasureCodingPolicy(file));
+        assertEquals(policy, fs.getErasureCodingPolicy(dir));
+        INode iNode = namesystem.getFSDirectory().getINode(file.toString());
+        assertEquals(policy.getId(), iNode.asFile().getFileReplication());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index d269a9d..152e153 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -58,10 +58,35 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
   }
 
-  private static final String[] hosts = new String[]{"host1", "host2", "host3",
-      "host4", "host5", "host6", "host7", "host8", "host9", "host10"};
-  private static final String[] racks = new String[]{"/r1", "/r1", "/r2", "/r2",
-      "/r3", "/r3", "/r4", "/r4", "/r5", "/r6"};
+  private static final String[] hosts = getHosts();
+  private static final String[] racks = getRacks();
+
+  private static String[] getHosts() {
+    String[] hosts = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
+    for (int i = 0; i < hosts.length; i++) {
+      hosts[i] = "host" + (i + 1);
+    }
+    return hosts;
+  }
+
+  private static String[] getRacks() {
+    String[] racks = new String[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1];
+    int numHostEachRack = (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS - 1) /
+        (NUM_DATA_BLOCKS - 1) + 1;
+    int j = 0;
+    // we have NUM_DATA_BLOCKS racks
+    for (int i = 1; i <= NUM_DATA_BLOCKS; i++) {
+      if (j == racks.length - 1) {
+        assert i == NUM_DATA_BLOCKS;
+        racks[j++] = "/r" + i;
+      } else {
+        for (int k = 0; k < numHostEachRack && j < racks.length - 1; k++) {
+          racks[j++] = "/r" + i;
+        }
+      }
+    }
+    return racks;
+  }
 
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
@@ -118,7 +143,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
    */
   @Test
   public void testReconstructForNotEnoughRacks() throws Exception {
-    MiniDFSCluster.DataNodeProperties host10 = stopDataNode("host10");
+    MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
+        hosts[hosts.length - 1]);
 
     final Path file = new Path("/foo");
     // the file's block is in 9 dn but 5 racks
@@ -135,16 +161,16 @@ public class TestReconstructStripedBlocksWithRackAwareness {
     for (DatanodeStorageInfo storage : blockInfo.storages) {
       rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
     }
-    Assert.assertEquals(5, rackSet.size());
+    Assert.assertEquals(NUM_DATA_BLOCKS - 1, rackSet.size());
 
     // restart the stopped datanode
-    cluster.restartDataNode(host10);
+    cluster.restartDataNode(lastHost);
     cluster.waitActive();
 
     // make sure we have 6 racks again
     NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
     Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
-    Assert.assertEquals(6, topology.getNumOfRacks());
+    Assert.assertEquals(NUM_DATA_BLOCKS, topology.getNumOfRacks());
 
     // pause all the heartbeats
     for (DataNode dn : cluster.getDataNodes()) {
@@ -180,7 +206,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
   @Test
   public void testChooseExcessReplicasToDelete() throws Exception {
-    MiniDFSCluster.DataNodeProperties host10 = stopDataNode("host10");
+    MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
+        hosts[hosts.length - 1]);
 
     final Path file = new Path("/foo");
     DFSTestUtil.createFile(fs, file,
@@ -188,8 +215,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
 
     // stop host1
     MiniDFSCluster.DataNodeProperties host1 = stopDataNode("host1");
-    // bring host10 back
-    cluster.restartDataNode(host10);
+    // bring last host back
+    cluster.restartDataNode(lastHost);
     cluster.waitActive();
 
     // wait for reconstruction to finish

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index ef4b2c6..7e2bc0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -777,8 +777,9 @@ public class TestBlockRecovery {
 
   @Test
   public void testSafeLength() throws Exception {
+    // hard coded policy to work with hard coded test suite
     ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager
-        .getSystemDefaultPolicy();
+        .getSystemPolicies()[0];
     RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock,
         new byte[9], ecPolicy);
     BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 645f6fa..0d8431d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -151,7 +151,7 @@ public class TestFSImage {
     long mtime = 1426222916-3600;
     long atime = 1426222916;
     BlockInfoContiguous[] blks = new BlockInfoContiguous[0];
-    short replication = 3;
+    short replication = testECPolicy.getId();
     long preferredBlockSize = 128*1024*1024;
     INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime,
         blks, replication, preferredBlockSize, (byte) 0, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index 2daadb4..0d15467 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -61,8 +60,9 @@ public class TestStripedINodeFile {
   private final BlockStoragePolicy defaultPolicy =
       defaultSuite.getDefaultPolicy();
 
+  // use hard coded policy - see HDFS-9816
   private static final ErasureCodingPolicy testECPolicy
-      = ErasureCodingPolicyManager.getSystemDefaultPolicy();
+      = ErasureCodingPolicyManager.getSystemPolicies()[0];
 
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
@@ -228,8 +228,8 @@ public class TestStripedINodeFile {
       final Path contiguousFile = new Path(parentDir, "someFile");
       final DistributedFileSystem dfs;
       final Configuration conf = new Configuration();
-      final short GROUP_SIZE = (short) (StripedFileTestUtil.NUM_DATA_BLOCKS
-          + StripedFileTestUtil.NUM_PARITY_BLOCKS);
+      final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() +
+          testECPolicy.getNumParityUnits());
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
 
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7600e3c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
index 889a7d6..96fc79c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
@@ -79,15 +79,16 @@ import static org.junit.Assert.assertFalse;
  * TODO: test parity block logic
  */
 public class TestStripedBlockUtil {
-  private final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS;
+  // use hard coded policy - see HDFS-9816
+  private final ErasureCodingPolicy EC_POLICY =
+      ErasureCodingPolicyManager.getSystemPolicies()[0];
+  private final short DATA_BLK_NUM = (short) EC_POLICY.getNumDataUnits();
+  private final short PARITY_BLK_NUM = (short) EC_POLICY.getNumParityUnits();
   private final short BLK_GROUP_WIDTH = (short) (DATA_BLK_NUM + PARITY_BLK_NUM);
   private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
   private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE;
   /** number of full stripes in a full block group */
   private final int BLK_GROUP_STRIPE_NUM = 16;
-  private final ErasureCodingPolicy ECPOLICY = ErasureCodingPolicyManager.
-      getSystemDefaultPolicy();
   private final Random random = new Random();
 
   private int[] blockGroupSizes;
@@ -157,7 +158,7 @@ public class TestStripedBlockUtil {
     int done = 0;
     while (done < bgSize) {
       Preconditions.checkState(done % CELLSIZE == 0);
-      StripingCell cell = new StripingCell(ECPOLICY, CELLSIZE, done / CELLSIZE, 0);
+      StripingCell cell = new StripingCell(EC_POLICY, CELLSIZE, done / CELLSIZE, 0);
       int idxInStripe = cell.idxInStripe;
       int size = Math.min(CELLSIZE, bgSize - done);
       for (int i = 0; i < size; i++) {
@@ -250,7 +251,7 @@ public class TestStripedBlockUtil {
           if (brStart + brSize > bgSize) {
             continue;
           }
-          AlignedStripe[] stripes = divideByteRangeIntoStripes(ECPOLICY,
+          AlignedStripe[] stripes = divideByteRangeIntoStripes(EC_POLICY,
               CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
 
           for (AlignedStripe stripe : stripes) {


[03/34] hadoop git commit: YARN-4761. NMs reconnecting with changed capabilities can lead to wrong cluster resource calculations on fair scheduler. Contributed by Sangjin Lee

Posted by ar...@apache.org.
YARN-4761. NMs reconnecting with changed capabilities can lead to wrong cluster resource calculations on fair scheduler. Contributed by Sangjin Lee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1ccc962
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1ccc962
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1ccc962

Branch: refs/heads/HDFS-1312
Commit: e1ccc9622b2f1fbefea1862fa74d1fb56d8eb264
Parents: 19ee185
Author: Zhihai Xu <zx...@apache.org>
Authored: Sun Mar 6 19:46:09 2016 -0800
Committer: Zhihai Xu <zx...@apache.org>
Committed: Sun Mar 6 19:46:09 2016 -0800

----------------------------------------------------------------------
 .../scheduler/fair/FairScheduler.java           |   4 +-
 .../scheduler/TestAbstractYarnScheduler.java    | 132 +++++++++++++++++++
 .../capacity/TestCapacityScheduler.java         | 110 ----------------
 3 files changed, 134 insertions(+), 112 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ccc962/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 2801bee..917fc8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -889,7 +889,7 @@ public class FairScheduler extends
     } else {
       nodesPerRack.put(rackName, 1);
     }
-    Resources.addTo(clusterResource, node.getTotalCapability());
+    Resources.addTo(clusterResource, schedulerNode.getTotalResource());
     updateMaximumAllocation(schedulerNode, true);
 
     triggerUpdate();
@@ -909,7 +909,7 @@ public class FairScheduler extends
     if (node == null) {
       return;
     }
-    Resources.subtractFrom(clusterResource, rmNode.getTotalCapability());
+    Resources.subtractFrom(clusterResource, node.getTotalResource());
     updateRootQueueMetrics();
 
     triggerUpdate();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ccc962/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
index 8411a4d..e7ba58d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
@@ -28,6 +28,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -38,11 +40,24 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -51,12 +66,19 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
+import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 @SuppressWarnings("unchecked")
 public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase {
@@ -615,4 +637,114 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase {
     Assert.assertEquals(expectedMaximumResource.getVirtualCores(),
         schedulerMaximumResourceCapability.getVirtualCores());
   }
+
+  private class SleepHandler implements EventHandler<SchedulerEvent> {
+    boolean sleepFlag = false;
+    int sleepTime = 20;
+    @Override
+    public void handle(SchedulerEvent event) {
+      try {
+        if (sleepFlag) {
+          Thread.sleep(sleepTime);
+        }
+      } catch(InterruptedException ie) {
+      }
+    }
+  }
+
+  private ResourceTrackerService getPrivateResourceTrackerService(
+      Dispatcher privateDispatcher, ResourceManager rm,
+      SleepHandler sleepHandler) {
+    Configuration conf = getConf();
+
+    RMContext privateContext =
+        new RMContextImpl(privateDispatcher, null, null, null, null, null, null,
+            null, null, null);
+    privateContext.setNodeLabelManager(Mockito.mock(RMNodeLabelsManager.class));
+
+    privateDispatcher.register(SchedulerEventType.class, sleepHandler);
+    privateDispatcher.register(SchedulerEventType.class,
+        rm.getResourceScheduler());
+    privateDispatcher.register(RMNodeEventType.class,
+        new ResourceManager.NodeEventDispatcher(privateContext));
+    ((Service) privateDispatcher).init(conf);
+    ((Service) privateDispatcher).start();
+    NMLivelinessMonitor nmLivelinessMonitor =
+        new NMLivelinessMonitor(privateDispatcher);
+    nmLivelinessMonitor.init(conf);
+    nmLivelinessMonitor.start();
+    NodesListManager nodesListManager = new NodesListManager(privateContext);
+    nodesListManager.init(conf);
+    RMContainerTokenSecretManager containerTokenSecretManager =
+        new RMContainerTokenSecretManager(conf);
+    containerTokenSecretManager.start();
+    NMTokenSecretManagerInRM nmTokenSecretManager =
+        new NMTokenSecretManagerInRM(conf);
+    nmTokenSecretManager.start();
+    ResourceTrackerService privateResourceTrackerService =
+        new ResourceTrackerService(privateContext, nodesListManager,
+            nmLivelinessMonitor, containerTokenSecretManager,
+            nmTokenSecretManager);
+    privateResourceTrackerService.init(conf);
+    privateResourceTrackerService.start();
+    rm.getResourceScheduler().setRMContext(privateContext);
+    return privateResourceTrackerService;
+  }
+
+  /**
+   * Test the behavior of the scheduler when a node reconnects
+   * with changed capabilities. This test is to catch any race conditions
+   * that might occur due to the use of the RMNode object.
+   * @throws Exception
+   */
+  @Test(timeout = 60000)
+  public void testNodemanagerReconnect() throws Exception {
+    configureScheduler();
+    Configuration conf = getConf();
+    MockRM rm = new MockRM(conf);
+    try {
+      rm.start();
+
+      conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, false);
+      DrainDispatcher privateDispatcher = new DrainDispatcher();
+      SleepHandler sleepHandler = new SleepHandler();
+      ResourceTrackerService privateResourceTrackerService =
+          getPrivateResourceTrackerService(privateDispatcher, rm, sleepHandler);
+
+      // Register node1
+      String hostname1 = "localhost1";
+      Resource capability = BuilderUtils.newResource(4096, 4);
+      RecordFactory recordFactory =
+          RecordFactoryProvider.getRecordFactory(null);
+
+      RegisterNodeManagerRequest request1 =
+          recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
+      NodeId nodeId1 = NodeId.newInstance(hostname1, 0);
+      request1.setNodeId(nodeId1);
+      request1.setHttpPort(0);
+      request1.setResource(capability);
+      privateResourceTrackerService.registerNodeManager(request1);
+      privateDispatcher.await();
+      Resource clusterResource =
+          rm.getResourceScheduler().getClusterResource();
+      Assert.assertEquals("Initial cluster resources don't match", capability,
+          clusterResource);
+
+      Resource newCapability = BuilderUtils.newResource(1024, 1);
+      RegisterNodeManagerRequest request2 =
+          recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
+      request2.setNodeId(nodeId1);
+      request2.setHttpPort(0);
+      request2.setResource(newCapability);
+      // hold up the disaptcher and register the same node with lower capability
+      sleepHandler.sleepFlag = true;
+      privateResourceTrackerService.registerNodeManager(request2);
+      privateDispatcher.await();
+      Assert.assertEquals("Cluster resources don't match", newCapability,
+          rm.getResourceScheduler().getClusterResource());
+      privateResourceTrackerService.stop();
+    } finally {
+      rm.stop();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ccc962/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index c8c97e9..b6c005b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -74,7 +73,6 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
-import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -82,7 +80,6 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.Application;
@@ -90,13 +87,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.NodeManager;
-import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
 import org.apache.hadoop.yarn.server.resourcemanager.Task;
 import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS;
 import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager;
@@ -116,7 +110,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
@@ -136,7 +129,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSc
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
@@ -3285,108 +3277,6 @@ public class TestCapacityScheduler {
     }
   }
 
-  private class SleepHandler implements EventHandler<SchedulerEvent> {
-    boolean sleepFlag = false;
-    int sleepTime = 20;
-    @Override
-    public void handle(SchedulerEvent event) {
-      try {
-        if(sleepFlag) {
-          Thread.sleep(sleepTime);
-        }
-      }
-      catch(InterruptedException ie) {
-      }
-    }
-  }
-
-  private ResourceTrackerService getPrivateResourceTrackerService(
-      Dispatcher privateDispatcher, SleepHandler sleepHandler) {
-
-    Configuration conf = new Configuration();
-    ResourceTrackerService privateResourceTrackerService;
-
-    RMContext privateContext =
-        new RMContextImpl(privateDispatcher, null, null, null, null, null, null,
-            null, null, null);
-    privateContext.setNodeLabelManager(Mockito.mock(RMNodeLabelsManager.class));
-
-    privateDispatcher.register(SchedulerEventType.class, sleepHandler);
-    privateDispatcher.register(SchedulerEventType.class,
-        resourceManager.getResourceScheduler());
-    privateDispatcher.register(RMNodeEventType.class,
-        new ResourceManager.NodeEventDispatcher(privateContext));
-    ((Service) privateDispatcher).init(conf);
-    ((Service) privateDispatcher).start();
-    NMLivelinessMonitor nmLivelinessMonitor =
-        new NMLivelinessMonitor(privateDispatcher);
-    nmLivelinessMonitor.init(conf);
-    nmLivelinessMonitor.start();
-    NodesListManager nodesListManager = new NodesListManager(privateContext);
-    nodesListManager.init(conf);
-    RMContainerTokenSecretManager containerTokenSecretManager =
-        new RMContainerTokenSecretManager(conf);
-    containerTokenSecretManager.start();
-    NMTokenSecretManagerInRM nmTokenSecretManager =
-        new NMTokenSecretManagerInRM(conf);
-    nmTokenSecretManager.start();
-    privateResourceTrackerService =
-        new ResourceTrackerService(privateContext, nodesListManager,
-            nmLivelinessMonitor, containerTokenSecretManager,
-            nmTokenSecretManager);
-    privateResourceTrackerService.init(conf);
-    privateResourceTrackerService.start();
-    resourceManager.getResourceScheduler().setRMContext(privateContext);
-    return privateResourceTrackerService;
-  }
-
-  /**
-   * Test the behaviour of the capacity scheduler when a node reconnects
-   * with changed capabilities. This test is to catch any race conditions
-   * that might occur due to the use of the RMNode object.
-   * @throws Exception
-   */
-  @Test
-  public void testNodemanagerReconnect() throws Exception {
-
-    DrainDispatcher privateDispatcher = new DrainDispatcher();
-    SleepHandler sleepHandler = new SleepHandler();
-    ResourceTrackerService privateResourceTrackerService =
-        getPrivateResourceTrackerService(privateDispatcher,
-            sleepHandler);
-
-    // Register node1
-    String hostname1 = "localhost1";
-    Resource capability = BuilderUtils.newResource(4096, 4);
-    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
-
-    RegisterNodeManagerRequest request1 =
-        recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
-    NodeId nodeId1 = NodeId.newInstance(hostname1, 0);
-    request1.setNodeId(nodeId1);
-    request1.setHttpPort(0);
-    request1.setResource(capability);
-    privateResourceTrackerService.registerNodeManager(request1);
-    privateDispatcher.await();
-    Resource clusterResource = resourceManager.getResourceScheduler().getClusterResource();
-    Assert.assertEquals("Initial cluster resources don't match", capability,
-        clusterResource);
-
-    Resource newCapability = BuilderUtils.newResource(1024, 1);
-    RegisterNodeManagerRequest request2 =
-        recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
-    request2.setNodeId(nodeId1);
-    request2.setHttpPort(0);
-    request2.setResource(newCapability);
-    // hold up the disaptcher and register the same node with lower capability
-    sleepHandler.sleepFlag = true;
-    privateResourceTrackerService.registerNodeManager(request2);
-    privateDispatcher.await();
-    Assert.assertEquals("Cluster resources don't match", newCapability,
-        resourceManager.getResourceScheduler().getClusterResource());
-    privateResourceTrackerService.stop();
-  }
-
   @Test
   public void testResourceUpdateDecommissioningNode() throws Exception {
     // Mock the RMNodeResourceUpdate event handler to update SchedulerNode


[29/34] hadoop git commit: HADOOP-12905. Clean up CHANGES.txt RAT exclusions from pom.xml files. Contributed by Chris Nauroth.

Posted by ar...@apache.org.
HADOOP-12905. Clean up CHANGES.txt RAT exclusions from pom.xml files. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89b16d27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89b16d27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89b16d27

Branch: refs/heads/HDFS-1312
Commit: 89b16d27e26216fc4e5153683105b323c310a0af
Parents: 55f73a1
Author: Chris Nauroth <cn...@apache.org>
Authored: Tue Mar 8 21:22:06 2016 -0800
Committer: Chris Nauroth <cn...@apache.org>
Committed: Tue Mar 8 21:22:06 2016 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/pom.xml | 1 -
 hadoop-hdfs-project/hadoop-hdfs/pom.xml     | 3 ---
 hadoop-mapreduce-project/pom.xml            | 1 -
 hadoop-yarn-project/pom.xml                 | 9 ---------
 4 files changed, 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89b16d27/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 503f312..8d41f28 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -478,7 +478,6 @@
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
           <excludes>
-            <exclude>CHANGES.txt</exclude>
             <exclude>.idea/**</exclude>
             <exclude>src/main/conf/*</exclude>
             <exclude>dev-support/jdiff/**</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89b16d27/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 32c060d..4e1901b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -370,9 +370,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
           <excludes>
-            <exclude>CHANGES.txt</exclude>
-            <exclude>CHANGES.HDFS-1623.txt</exclude>
-            <exclude>CHANGES.HDFS-347.txt</exclude>
             <exclude>.gitattributes</exclude>
             <exclude>.idea/**</exclude>
             <exclude>src/main/conf/*</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89b16d27/hadoop-mapreduce-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index 0cbd928..637de73 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -194,7 +194,6 @@
         <configuration>
           <excludes>
             <exclude>.eclipse.templates/</exclude>
-            <exclude>CHANGES.*txt</exclude>
             <exclude>lib/jdiff/**</exclude>
           </excludes>
         </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89b16d27/hadoop-yarn-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index e21e0b8..48dce20 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -104,15 +104,6 @@
           </execution>
         </executions>
       </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>CHANGES.txt</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
     </plugins>
   </build>
 


[02/34] hadoop git commit: YARN-4763. RMApps Page crashes with NPE. (Bibin A Chundatt via rohithsharmaks)

Posted by ar...@apache.org.
YARN-4763. RMApps Page crashes with NPE. (Bibin A Chundatt via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19ee1859
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19ee1859
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19ee1859

Branch: refs/heads/HDFS-1312
Commit: 19ee1859071509bba9ecd0a8a7dc6a47e2979c88
Parents: c50aad0
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Sat Mar 5 12:38:15 2016 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Sat Mar 5 13:02:57 2016 +0530

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/webapp/RMAppsBlock.java     | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19ee1859/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
index 24a40c1..b4d7921 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.webapp.AppsBlock;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -95,9 +96,11 @@ public class RMAppsBlock extends AppsBlock {
       }
 
       String blacklistedNodesCount = "N/A";
-      Set<String> nodes = rm.getRMContext().getRMApps()
-          .get(appAttemptId.getApplicationId()).getAppAttempts()
-          .get(appAttemptId).getBlacklistedNodes();
+      RMAppAttempt appAttempt =
+          rm.getRMContext().getRMApps().get(appAttemptId.getApplicationId())
+              .getAppAttempts().get(appAttemptId);
+      Set<String> nodes =
+          null == appAttempt ? null : appAttempt.getBlacklistedNodes();
       if (nodes != null) {
         blacklistedNodesCount = String.valueOf(nodes.size());
       }


[22/34] hadoop git commit: YARN-4465. SchedulerUtils#validateRequest for Label check should happen only when nodelabel enabled. (Bibin A Chundatt via wangda)

Posted by ar...@apache.org.
YARN-4465. SchedulerUtils#validateRequest for Label check should happen only when nodelabel enabled. (Bibin A Chundatt via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0233d4e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0233d4e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0233d4e0

Branch: refs/heads/HDFS-1312
Commit: 0233d4e0ee9947a95c018b1539310fc0bff6c44e
Parents: a3cc6e2
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Mar 8 14:27:03 2016 -0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Mar 8 14:27:03 2016 -0800

----------------------------------------------------------------------
 .../scheduler/SchedulerUtils.java               | 56 +++++++++++++-------
 .../scheduler/TestSchedulerUtils.java           | 34 +++++++++++-
 2 files changed, 70 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0233d4e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index a80e921..b460964 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -24,6 +24,7 @@ import java.util.Set;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
@@ -32,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -222,6 +224,17 @@ public class SchedulerUtils {
       Resource maximumResource, String queueName, YarnScheduler scheduler,
       boolean isRecovery, RMContext rmContext, QueueInfo queueInfo)
       throws InvalidResourceRequestException {
+    Configuration conf = rmContext.getYarnConfiguration();
+    // If Node label is not enabled throw exception
+    if (null != conf && !YarnConfiguration.areNodeLabelsEnabled(conf)) {
+      String labelExp = resReq.getNodeLabelExpression();
+      if (!(RMNodeLabelsManager.NO_LABEL.equals(labelExp)
+          || null == labelExp)) {
+        throw new InvalidLabelResourceRequestException(
+            "Invalid resource request, node label not enabled "
+                + "but request contains label expression");
+      }
+    }
     if (null == queueInfo) {
       try {
         queueInfo = scheduler.getQueueInfo(queueName, false, false);
@@ -283,8 +296,8 @@ public class SchedulerUtils {
     // we don't allow specify label expression other than resourceName=ANY now
     if (!ResourceRequest.ANY.equals(resReq.getResourceName())
         && labelExp != null && !labelExp.trim().isEmpty()) {
-      throw new InvalidResourceRequestException(
-          "Invailid resource request, queue=" + queueInfo.getQueueName()
+      throw new InvalidLabelResourceRequestException(
+          "Invalid resource request, queue=" + queueInfo.getQueueName()
               + " specified node label expression in a "
               + "resource request has resource name = "
               + resReq.getResourceName());
@@ -303,15 +316,28 @@ public class SchedulerUtils {
       if (!checkQueueLabelExpression(queueInfo.getAccessibleNodeLabels(),
           labelExp, rmContext)) {
         throw new InvalidLabelResourceRequestException(
-            "Invalid resource request"
-            + ", queue="
-            + queueInfo.getQueueName()
-            + " doesn't have permission to access all labels "
-            + "in resource request. labelExpression of resource request="
-            + labelExp
-            + ". Queue labels="
-            + (queueInfo.getAccessibleNodeLabels() == null ? "" : StringUtils.join(queueInfo
-                .getAccessibleNodeLabels().iterator(), ',')));
+            "Invalid resource request" + ", queue=" + queueInfo.getQueueName()
+                + " doesn't have permission to access all labels "
+                + "in resource request. labelExpression of resource request="
+                + labelExp + ". Queue labels="
+                + (queueInfo.getAccessibleNodeLabels() == null ? ""
+                    : StringUtils.join(
+                        queueInfo.getAccessibleNodeLabels().iterator(), ',')));
+      } else {
+        checkQueueLabelInLabelManager(labelExp, rmContext);
+      }
+    }
+  }
+
+  private static void checkQueueLabelInLabelManager(String labelExpression,
+      RMContext rmContext) throws InvalidLabelResourceRequestException {
+    // check node label manager contains this label
+    if (null != rmContext) {
+      RMNodeLabelsManager nlm = rmContext.getNodeLabelManager();
+      if (nlm != null && !nlm.containsNodeLabel(labelExpression)) {
+        throw new InvalidLabelResourceRequestException(
+            "Invalid label resource request, cluster do not contain "
+                + ", label= " + labelExpression);
       }
     }
   }
@@ -338,14 +364,6 @@ public class SchedulerUtils {
             return false;
           }
         }
-        
-        // check node label manager contains this label
-        if (null != rmContext) {
-          RMNodeLabelsManager nlm = rmContext.getNodeLabelManager();
-          if (nlm != null && !nlm.containsNodeLabel(str)) {
-            return false;
-          }
-        }
       }
     }
     return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0233d4e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index 0e84d38..3208819 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -95,7 +95,8 @@ public class TestSchedulerUtils {
   private static final Log LOG = LogFactory.getLog(TestSchedulerUtils.class);
   
   private RMContext rmContext = getMockRMContext();
-  
+  private static YarnConfiguration conf = new YarnConfiguration();
+
   @Test (timeout = 30000)
   public void testNormalizeRequest() {
     ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
@@ -464,6 +465,34 @@ public class TestSchedulerUtils {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
           Arrays.asList("x"));
     }
+    try {
+      Resource resource = Resources.createResource(0,
+          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+      ResourceRequest resReq1 = BuilderUtils
+          .newResourceRequest(mock(Priority.class), "*", resource, 1, "x");
+      SchedulerUtils.normalizeAndvalidateRequest(resReq1, maxResource, "queue",
+          scheduler, rmContext);
+      fail("Should fail");
+    } catch (InvalidResourceRequestException e) {
+      assertEquals("Invalid label resource request, cluster do not contain , "
+          + "label= x", e.getMessage());
+    }
+
+    try {
+      rmContext.getYarnConfiguration()
+          .set(YarnConfiguration.NODE_LABELS_ENABLED, "false");
+      Resource resource = Resources.createResource(0,
+          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+      ResourceRequest resReq1 = BuilderUtils
+          .newResourceRequest(mock(Priority.class), "*", resource, 1, "x");
+      SchedulerUtils.normalizeAndvalidateRequest(resReq1, maxResource, "queue",
+          scheduler, rmContext);
+      Assert.assertEquals(RMNodeLabelsManager.NO_LABEL,
+          resReq1.getNodeLabelExpression());
+    } catch (InvalidResourceRequestException e) {
+      assertEquals("Invalid resource request, node label not enabled but "
+          + "request contains label expression", e.getMessage());
+    }
   }
 
   @Test (timeout = 30000)
@@ -773,6 +802,9 @@ public class TestSchedulerUtils {
     RMContext rmContext = mock(RMContext.class);
     RMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
     nlm.init(new Configuration(false));
+    when(rmContext.getYarnConfiguration()).thenReturn(conf);
+    rmContext.getYarnConfiguration().set(YarnConfiguration.NODE_LABELS_ENABLED,
+        "true");
     when(rmContext.getNodeLabelManager()).thenReturn(nlm);
     return rmContext;
   }


[05/34] hadoop git commit: YARN-4737. Add CSRF filter support in YARN. Contributed by Jonathan Maron.

Posted by ar...@apache.org.
YARN-4737. Add CSRF filter support in YARN. Contributed by Jonathan Maron.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e51a8c10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e51a8c10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e51a8c10

Branch: refs/heads/HDFS-1312
Commit: e51a8c10560e5db5cf01fd530af48825cb51c9ea
Parents: 4f9fe3a
Author: Varun Vasudev <vv...@apache.org>
Authored: Mon Mar 7 15:16:35 2016 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Mon Mar 7 15:26:44 2016 +0530

----------------------------------------------------------------------
 .../mapreduce/v2/jobhistory/JHAdminConfig.java  |  13 ++
 .../src/main/resources/mapred-default.xml       |  26 +++
 .../mapreduce/v2/hs/HistoryClientService.java   |   1 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  24 ++
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |  42 ++++
 .../src/main/resources/yarn-default.xml         |  78 +++++++
 .../ApplicationHistoryServer.java               |  19 +-
 .../server/nodemanager/webapp/WebServer.java    |   1 +
 .../server/resourcemanager/ResourceManager.java |   1 +
 .../yarn/webapp/TestRMWithCSRFFilter.java       | 231 +++++++++++++++++++
 10 files changed, 429 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
index 1f2088a..5aa4671 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
@@ -225,6 +225,19 @@ public class JHAdminConfig {
       + "jobname.limit";
   public static final int DEFAULT_MR_HS_JOBNAME_LIMIT = 50;
 
+
+  /**
+   * CSRF settings.
+   */
+  public static final String MR_HISTORY_CSRF_PREFIX = MR_HISTORY_PREFIX +
+                                                      "webapp.rest-csrf.";
+  public static final String MR_HISTORY_CSRF_ENABLED = MR_HISTORY_CSRF_PREFIX +
+                                                       "enabled";
+  public static final String MR_HISTORY_CSRF_CUSTOM_HEADER =
+      MR_HISTORY_CSRF_PREFIX + "custom-header";
+  public static final String MR_HISTORY_METHODS_TO_IGNORE =
+      MR_HISTORY_CSRF_PREFIX + "methods-to-ignore";
+
   /**
    * Settings for .jhist file format.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index da25a99..b7bdcc8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1862,4 +1862,30 @@
   default is -1</description>
 </property>
 
+<property>
+  <description>
+    Enable the CSRF filter for the job history web app
+  </description>
+  <name>mapreduce.jobhistory.webapp.rest-csrf.enabled</name>
+  <value>false</value>
+</property>
+
+<property>
+  <description>
+    Optional parameter that indicates the custom header name to use for CSRF
+    protection.
+  </description>
+  <name>mapreduce.jobhistory.webapp.rest-csrf.custom-header</name>
+  <value>X-XSRF-Header</value>
+</property>
+
+<property>
+  <description>
+    Optional parameter that indicates the list of HTTP methods that do not
+    require CSRF protection
+  </description>
+  <name>mapreduce.jobhistory.webapp.rest-csrf.methods-to-ignore</name>
+  <value>GET,OPTIONS,HEAD</value>
+</property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
index 3751ad9..2fbaade 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
@@ -160,6 +160,7 @@ public class HistoryClientService extends AbstractService {
             JHAdminConfig.MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
         .withHttpSpnegoPrincipalKey(
             JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY)
+        .withCSRFProtection(JHAdminConfig.MR_HISTORY_CSRF_PREFIX)
         .at(NetUtils.getHostPortString(bindAddress)).start(webApp);
     
     String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index cef6932..61d1d72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2399,6 +2399,30 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_SCRIPT_OPTS =
       NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PREFIX + "opts";
 
+  // RM and NM CSRF props
+  public static final String REST_CSRF = "webapp.rest-csrf.";
+  public static final String RM_CSRF_PREFIX = RM_PREFIX + REST_CSRF;
+  public static final String NM_CSRF_PREFIX = NM_PREFIX + REST_CSRF;
+  public static final String TIMELINE_CSRF_PREFIX = TIMELINE_SERVICE_PREFIX +
+                                                    REST_CSRF;
+  public static final String RM_CSRF_ENABLED = RM_CSRF_PREFIX + "enabled";
+  public static final String NM_CSRF_ENABLED = NM_CSRF_PREFIX + "enabled";
+  public static final String TIMELINE_CSRF_ENABLED = TIMELINE_CSRF_PREFIX +
+                                                     "enabled";
+  public static final String RM_CSRF_CUSTOM_HEADER = RM_CSRF_PREFIX +
+                                                     "custom-header";
+  public static final String NM_CSRF_CUSTOM_HEADER = NM_CSRF_PREFIX +
+                                                     "custom-header";
+  public static final String TIMELINE_CSRF_CUSTOM_HEADER =
+      TIMELINE_CSRF_PREFIX + "custom-header";
+  public static final String RM_CSRF_METHODS_TO_IGNORE = RM_CSRF_PREFIX +
+                                                     "methods-to-ignore";
+  public static final String NM_CSRF_METHODS_TO_IGNORE = NM_CSRF_PREFIX +
+                                                         "methods-to-ignore";
+  public static final String TIMELINE_CSRF_METHODS_TO_IGNORE =
+      TIMELINE_CSRF_PREFIX + "methods-to-ignore";
+
+
   public YarnConfiguration() {
     super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 0c6edad..6144a0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.slf4j.Logger;
@@ -73,6 +74,7 @@ import com.google.inject.servlet.GuiceFilter;
 public class WebApps {
   static final Logger LOG = LoggerFactory.getLogger(WebApps.class);
   public static class Builder<T> {
+
     static class ServletStruct {
       public Class<? extends HttpServlet> clazz;
       public String name;
@@ -91,6 +93,7 @@ public class WebApps {
     boolean devMode = false;
     private String spnegoPrincipalKey;
     private String spnegoKeytabKey;
+    private String configPrefix;
     private final HashSet<ServletStruct> servlets = new HashSet<ServletStruct>();
     private final HashMap<String, Object> attributes = new HashMap<String, Object>();
 
@@ -161,6 +164,18 @@ public class WebApps {
       return this;
     }
 
+    /**
+     * Enable the CSRF filter.
+     * @param csrfConfigPrefix The config prefix that identifies the
+     *                         CSRF parameters applicable for this filter
+     *                         instance.
+     * @return the Builder instance
+     */
+    public Builder<T> withCSRFProtection(String csrfConfigPrefix) {
+      this.configPrefix = csrfConfigPrefix;
+      return this;
+    }
+
     public Builder<T> inDevMode() {
       devMode = true;
       return this;
@@ -266,6 +281,19 @@ public class WebApps {
         for(Map.Entry<String, Object> entry : attributes.entrySet()) {
           server.setAttribute(entry.getKey(), entry.getValue());
         }
+        Map<String, String> params = getCsrfConfigParameters();
+
+        if (hasCSRFEnabled(params)) {
+          LOG.info("CSRF Protection has been enabled for the {} application. "
+                   + "Please ensure that there is an authentication mechanism "
+                   + "enabled (kerberos, custom, etc).",
+                   name);
+          String restCsrfClassName = RestCsrfPreventionFilter.class.getName();
+          HttpServer2.defineFilter(server.getWebAppContext(), restCsrfClassName,
+                                   restCsrfClassName, params,
+                                   new String[] {"/*"});
+        }
+
         HttpServer2.defineFilter(server.getWebAppContext(), "guice",
           GuiceFilter.class.getName(), null, new String[] { "/*" });
 
@@ -295,6 +323,20 @@ public class WebApps {
       return webapp;
     }
 
+    private boolean hasCSRFEnabled(Map<String, String> params) {
+      return params != null && Boolean.valueOf(params.get("enabled"));
+    }
+
+    private Map<String, String> getCsrfConfigParameters() {
+      Map<String, String> params = null;
+      if (configPrefix != null) {
+        // need to obtain parameters for CSRF filter
+        params =
+            RestCsrfPreventionFilter.getFilterParams(conf, configPrefix);
+      }
+      return params;
+    }
+
     public WebApp start() {
       return start(null);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index cc08802..ea1afe4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2637,4 +2637,82 @@
     <name>yarn.node-labels.fs-store.impl.class</name>
     <value>org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore</value>
   </property>
+
+  <property>
+    <description>
+      Enable the CSRF filter for the RM web app
+    </description>
+    <name>yarn.resourcemanager.webapp.rest-csrf.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      Optional parameter that indicates the custom header name to use for CSRF
+      protection.
+    </description>
+    <name>yarn.resourcemanager.webapp.rest-csrf.custom-header</name>
+    <value>X-XSRF-Header</value>
+  </property>
+
+  <property>
+    <description>
+      Optional parameter that indicates the list of HTTP methods that do not
+      require CSRF protection
+    </description>
+    <name>yarn.resourcemanager.webapp.rest-csrf.methods-to-ignore</name>
+    <value>GET,OPTIONS,HEAD</value>
+  </property>
+
+  <property>
+    <description>
+      Enable the CSRF filter for the NM web app
+    </description>
+    <name>yarn.nodemanager.webapp.rest-csrf.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      Optional parameter that indicates the custom header name to use for CSRF
+      protection.
+    </description>
+    <name>yarn.nodemanager.webapp.rest-csrf.custom-header</name>
+    <value>X-XSRF-Header</value>
+  </property>
+
+  <property>
+    <description>
+      Optional parameter that indicates the list of HTTP methods that do not
+      require CSRF protection
+    </description>
+    <name>yarn.nodemanager.webapp.rest-csrf.methods-to-ignore</name>
+    <value>GET,OPTIONS,HEAD</value>
+  </property>
+
+  <property>
+    <description>
+      Enable the CSRF filter for the timeline service web app
+    </description>
+    <name>yarn.timeline-service.webapp.rest-csrf.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      Optional parameter that indicates the custom header name to use for CSRF
+      protection.
+    </description>
+    <name>yarn.timeline-service.webapp.rest-csrf.custom-header</name>
+    <value>X-XSRF-Header</value>
+  </property>
+
+  <property>
+    <description>
+      Optional parameter that indicates the list of HTTP methods that do not
+      require CSRF protection
+    </description>
+    <name>yarn.timeline-service.webapp.rest-csrf.methods-to-ignore</name>
+    <value>GET,OPTIONS,HEAD</value>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index f4fe140..cedbd2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -297,16 +297,21 @@ public class ApplicationHistoryServer extends CompositeService {
                           YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
                           WebAppUtils.getAHSWebAppURLWithoutScheme(conf));
     try {
-      AHSWebApp ahsWebApp = new AHSWebApp(timelineDataManager, ahsClientService);
+      AHSWebApp ahsWebApp =
+          new AHSWebApp(timelineDataManager, ahsClientService);
       webApp =
           WebApps
             .$for("applicationhistory", ApplicationHistoryClientService.class,
                 ahsClientService, "ws")
-             .with(conf).withAttribute(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
-                 conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS)).at(bindAddress).build(ahsWebApp);
+             .with(conf)
+              .withAttribute(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+                 conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS))
+              .withCSRFProtection(YarnConfiguration.TIMELINE_CSRF_PREFIX)
+              .at(bindAddress).build(ahsWebApp);
        HttpServer2 httpServer = webApp.httpServer();
 
-       String[] names = conf.getTrimmedStrings(YarnConfiguration.TIMELINE_SERVICE_UI_NAMES);
+       String[] names = conf.getTrimmedStrings(
+           YarnConfiguration.TIMELINE_SERVICE_UI_NAMES);
        WebAppContext webAppContext = httpServer.getWebAppContext();
 
        for (String name : names) {
@@ -332,9 +337,9 @@ public class ApplicationHistoryServer extends CompositeService {
        }
        httpServer.start();
        conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-         YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
-         YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS,
-         this.getListenerAddress());
+        YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS,
+        this.getListenerAddress());
        LOG.info("Instantiating AHSWebApp at " + getPort());
     } catch (Exception e) {
       String msg = "AHSWebApp failed to start.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
index 319c10c..827e1b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
@@ -79,6 +79,7 @@ public class WebServer extends AbstractService {
               YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY)
             .withHttpSpnegoKeytabKey(
               YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
+            .withCSRFProtection(YarnConfiguration.NM_CSRF_PREFIX)
             .start(this.nmWebApp);
       this.port = this.webApp.httpServer().getConnectorAddress(0).getPort();
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 80b33a3..2744bb4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1058,6 +1058,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
                 YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY)
             .withHttpSpnegoKeytabKey(
                 YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
+            .withCSRFProtection(YarnConfiguration.RM_CSRF_PREFIX)
             .at(webAppAddress);
     String proxyHostAndPort = WebAppUtils.getProxyHostAndPort(conf);
     if(WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51a8c10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithCSRFFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithCSRFFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithCSRFFilter.java
new file mode 100644
index 0000000..2efbd2d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithCSRFFilter.java
@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
+import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import java.io.StringReader;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Used TestRMWebServices as an example of web invocations of RM and added
+ * test for CSRF Filter.
+ */
+public class TestRMWithCSRFFilter extends JerseyTestBase {
+
+  private static MockRM rm;
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      Configuration conf = new Configuration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+          ResourceScheduler.class);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+      RestCsrfPreventionFilter csrfFilter = new RestCsrfPreventionFilter();
+      Map<String,String> initParams = new HashMap<>();
+      // adding GET as protected method to make things a little easier...
+      initParams.put(RestCsrfPreventionFilter.CUSTOM_METHODS_TO_IGNORE_PARAM,
+                     "OPTIONS,HEAD,TRACE");
+      filter("/*").through(csrfFilter, initParams);
+    }
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  public TestRMWithCSRFFilter() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+              .contextListenerClass(GuiceServletConfig.class)
+              .filterClass(com.google.inject.servlet.GuiceFilter.class)
+              .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testNoCustomHeaderFromBrowser() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("info").accept("application/xml")
+        .header(RestCsrfPreventionFilter.HEADER_USER_AGENT,"Mozilla/5.0")
+        .get(ClientResponse.class);
+    assertTrue("Should have been rejected", response.getStatus() ==
+                                            Status.BAD_REQUEST.getStatusCode());
+  }
+
+  @Test
+  public void testIncludeCustomHeaderFromBrowser() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("info").accept("application/xml")
+        .header(RestCsrfPreventionFilter.HEADER_USER_AGENT,"Mozilla/5.0")
+        .header("X-XSRF-HEADER", "")
+        .get(ClientResponse.class);
+    assertTrue("Should have been accepted", response.getStatus() ==
+                                            Status.OK.getStatusCode());
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+    verifyClusterInfoXML(xml);
+  }
+
+  @Test
+  public void testAllowedMethod() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("info").accept("application/xml")
+        .header(RestCsrfPreventionFilter.HEADER_USER_AGENT,"Mozilla/5.0")
+        .head();
+    assertTrue("Should have been allowed", response.getStatus() ==
+                                           Status.OK.getStatusCode());
+  }
+
+  @Test
+  public void testAllowNonBrowserInteractionWithoutHeader() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("info").accept("application/xml")
+        .get(ClientResponse.class);
+    assertTrue("Should have been accepted", response.getStatus() ==
+                                            Status.OK.getStatusCode());
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+    verifyClusterInfoXML(xml);
+  }
+
+  public void verifyClusterInfoXML(String xml) throws Exception {
+    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+    DocumentBuilder db = dbf.newDocumentBuilder();
+    InputSource is = new InputSource();
+    is.setCharacterStream(new StringReader(xml));
+    Document dom = db.parse(is);
+    NodeList nodes = dom.getElementsByTagName("clusterInfo");
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+
+      verifyClusterGeneric(WebServicesTestUtils.getXmlLong(element, "id"),
+         WebServicesTestUtils.getXmlLong(element, "startedOn"),
+         WebServicesTestUtils.getXmlString(element, "state"),
+         WebServicesTestUtils.getXmlString(element, "haState"),
+         WebServicesTestUtils.getXmlString(
+             element, "haZooKeeperConnectionState"),
+         WebServicesTestUtils.getXmlString(element, "hadoopVersionBuiltOn"),
+         WebServicesTestUtils.getXmlString(element, "hadoopBuildVersion"),
+         WebServicesTestUtils.getXmlString(element, "hadoopVersion"),
+         WebServicesTestUtils.getXmlString(element,
+                                           "resourceManagerVersionBuiltOn"),
+         WebServicesTestUtils.getXmlString(element,
+                                           "resourceManagerBuildVersion"),
+         WebServicesTestUtils.getXmlString(element, "resourceManagerVersion"));
+    }
+  }
+
+  public void verifyClusterGeneric(long clusterid, long startedon,
+                                   String state, String haState,
+                                   String haZooKeeperConnectionState,
+                                   String hadoopVersionBuiltOn,
+                                   String hadoopBuildVersion,
+                                   String hadoopVersion,
+                                   String resourceManagerVersionBuiltOn,
+                                   String resourceManagerBuildVersion,
+                                   String resourceManagerVersion) {
+
+    assertEquals("clusterId doesn't match: ",
+                 ResourceManager.getClusterTimeStamp(), clusterid);
+    assertEquals("startedOn doesn't match: ",
+                 ResourceManager.getClusterTimeStamp(), startedon);
+    assertTrue("stated doesn't match: " + state,
+               state.matches(STATE.INITED.toString()));
+    assertTrue("HA state doesn't match: " + haState,
+               haState.matches("INITIALIZING"));
+
+    WebServicesTestUtils.checkStringMatch("hadoopVersionBuiltOn",
+                                          VersionInfo.getDate(), hadoopVersionBuiltOn);
+    WebServicesTestUtils.checkStringEqual("hadoopBuildVersion",
+                                          VersionInfo.getBuildVersion(), hadoopBuildVersion);
+    WebServicesTestUtils.checkStringMatch("hadoopVersion",
+                                          VersionInfo.getVersion(), hadoopVersion);
+
+    WebServicesTestUtils.checkStringMatch("resourceManagerVersionBuiltOn",
+                                          YarnVersionInfo.getDate(),
+                                          resourceManagerVersionBuiltOn);
+    WebServicesTestUtils.checkStringEqual("resourceManagerBuildVersion",
+                                          YarnVersionInfo.getBuildVersion(), resourceManagerBuildVersion);
+    WebServicesTestUtils.checkStringMatch("resourceManagerVersion",
+                                          YarnVersionInfo.getVersion(),
+                                          resourceManagerVersion);
+  }
+
+}


[09/34] hadoop git commit: YARN-4760. proxy redirect to history server uses wrong URL. Contributed by Eric Badger

Posted by ar...@apache.org.
YARN-4760. proxy redirect to history server uses wrong URL. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4163e36c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4163e36c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4163e36c

Branch: refs/heads/HDFS-1312
Commit: 4163e36c2be2f562545aba93c1d47643a9ff4741
Parents: 059caf9
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Mar 7 15:56:33 2016 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Mar 7 15:56:33 2016 +0000

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java | 2 +-
 .../apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4163e36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index 0e988b8..ab4b295 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -130,7 +130,7 @@ public class WebAppProxyServlet extends HttpServlet {
         WebAppUtils.getResolvedRMWebAppURLWithScheme(conf), "cluster", "app");
     this.ahsAppPageUrlBase = StringHelper.pjoin(
         WebAppUtils.getHttpSchemePrefix(conf) + WebAppUtils
-        .getAHSWebAppURLWithoutScheme(conf), "applicationhistory", "apps");
+        .getAHSWebAppURLWithoutScheme(conf), "applicationhistory", "app");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4163e36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
index 68d1258..330e4de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
@@ -275,7 +275,7 @@ public class TestWebAppProxyServlet {
     }
     String appAddressInAhs = WebAppUtils.getHttpSchemePrefix(configuration) +
         WebAppUtils.getAHSWebAppURLWithoutScheme(configuration) +
-        "/applicationhistory" + "/apps/" + app.toString();
+        "/applicationhistory" + "/app/" + app.toString();
     assertTrue("Webapp proxy servlet should have redirected to AHS",
         proxyConn.getURL().toString().equals(appAddressInAhs));
     }


[12/34] hadoop git commit: HDFS-9906. Remove spammy log spew when a datanode is restarted. (Contributed by Brahma Reddy Battula)

Posted by ar...@apache.org.
HDFS-9906. Remove spammy log spew when a datanode is restarted. (Contributed by Brahma Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/724d2299
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/724d2299
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/724d2299

Branch: refs/heads/HDFS-1312
Commit: 724d2299cd2516d90c030f6e20d814cceb439228
Parents: b266176
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Mar 7 12:19:53 2016 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Mar 7 12:19:53 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/724d2299/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5175c13..4123654 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2850,7 +2850,7 @@ public class BlockManager implements BlockStatsMXBean {
       corruptReplicas.removeFromCorruptReplicasMap(block, node,
           Reason.GENSTAMP_MISMATCH);
       curReplicaDelta = 0;
-      blockLog.warn("BLOCK* addStoredBlock: Redundant addStoredBlock request"
+      blockLog.debug("BLOCK* addStoredBlock: Redundant addStoredBlock request"
               + " received for {} on node {} size {}", storedBlock, node,
           storedBlock.getNumBytes());
     }


[28/34] hadoop git commit: HADOOP-12798. Update changelog and release notes (2016-03-04) (aw)

Posted by ar...@apache.org.
HADOOP-12798. Update changelog and release notes (2016-03-04) (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55f73a1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55f73a1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55f73a1c

Branch: refs/heads/HDFS-1312
Commit: 55f73a1cbd45a62774e471b7a8a1e0cc40eecdd4
Parents: 6f9d2f6
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Mar 4 12:23:52 2016 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Mar 8 16:48:38 2016 -0800

----------------------------------------------------------------------
 .../markdown/release/0.1.0/CHANGES.0.1.0.md     |   6 +
 .../release/0.1.0/RELEASENOTES.0.1.0.md         |   2 +-
 .../markdown/release/0.1.1/CHANGES.0.1.1.md     |   6 +
 .../release/0.1.1/RELEASENOTES.0.1.1.md         |   2 +-
 .../markdown/release/0.10.0/CHANGES.0.10.0.md   |   6 +
 .../release/0.10.0/RELEASENOTES.0.10.0.md       |   2 +-
 .../markdown/release/0.10.1/CHANGES.0.10.1.md   |   6 +
 .../release/0.10.1/RELEASENOTES.0.10.1.md       |   2 +-
 .../markdown/release/0.11.0/CHANGES.0.11.0.md   |   6 +
 .../release/0.11.0/RELEASENOTES.0.11.0.md       |   2 +-
 .../markdown/release/0.11.1/CHANGES.0.11.1.md   |   6 +
 .../release/0.11.1/RELEASENOTES.0.11.1.md       |   2 +-
 .../markdown/release/0.11.2/CHANGES.0.11.2.md   |   6 +
 .../release/0.11.2/RELEASENOTES.0.11.2.md       |   2 +-
 .../markdown/release/0.12.0/CHANGES.0.12.0.md   |   6 +
 .../release/0.12.0/RELEASENOTES.0.12.0.md       |   2 +-
 .../markdown/release/0.12.1/CHANGES.0.12.1.md   |   6 +
 .../release/0.12.1/RELEASENOTES.0.12.1.md       |   2 +-
 .../markdown/release/0.12.2/CHANGES.0.12.2.md   |   6 +
 .../release/0.12.2/RELEASENOTES.0.12.2.md       |   2 +-
 .../markdown/release/0.12.3/CHANGES.0.12.3.md   |   6 +
 .../release/0.12.3/RELEASENOTES.0.12.3.md       |   2 +-
 .../markdown/release/0.13.0/CHANGES.0.13.0.md   |   6 +
 .../release/0.13.0/RELEASENOTES.0.13.0.md       |   2 +-
 .../markdown/release/0.14.0/CHANGES.0.14.0.md   |   6 +
 .../release/0.14.0/RELEASENOTES.0.14.0.md       |   2 +-
 .../markdown/release/0.14.1/CHANGES.0.14.1.md   |   6 +
 .../release/0.14.1/RELEASENOTES.0.14.1.md       |   2 +-
 .../markdown/release/0.14.2/CHANGES.0.14.2.md   |   6 +
 .../release/0.14.2/RELEASENOTES.0.14.2.md       |   2 +-
 .../markdown/release/0.14.3/CHANGES.0.14.3.md   |   6 +
 .../release/0.14.3/RELEASENOTES.0.14.3.md       |   2 +-
 .../markdown/release/0.14.4/CHANGES.0.14.4.md   |   6 +
 .../release/0.14.4/RELEASENOTES.0.14.4.md       |   2 +-
 .../markdown/release/0.15.0/CHANGES.0.15.0.md   |   6 +
 .../release/0.15.0/RELEASENOTES.0.15.0.md       |   2 +-
 .../markdown/release/0.15.1/CHANGES.0.15.1.md   |   6 +
 .../release/0.15.1/RELEASENOTES.0.15.1.md       |   2 +-
 .../markdown/release/0.15.2/CHANGES.0.15.2.md   |   6 +
 .../release/0.15.2/RELEASENOTES.0.15.2.md       |   2 +-
 .../markdown/release/0.15.3/CHANGES.0.15.3.md   |   6 +
 .../release/0.15.3/RELEASENOTES.0.15.3.md       |   2 +-
 .../markdown/release/0.15.4/CHANGES.0.15.4.md   |   8 +-
 .../release/0.15.4/RELEASENOTES.0.15.4.md       |   2 +-
 .../markdown/release/0.16.0/CHANGES.0.16.0.md   |   6 +
 .../release/0.16.0/RELEASENOTES.0.16.0.md       |   2 +-
 .../markdown/release/0.16.1/CHANGES.0.16.1.md   |   6 +
 .../release/0.16.1/RELEASENOTES.0.16.1.md       |   2 +-
 .../markdown/release/0.16.2/CHANGES.0.16.2.md   |   6 +
 .../release/0.16.2/RELEASENOTES.0.16.2.md       |   2 +-
 .../markdown/release/0.16.3/CHANGES.0.16.3.md   |   6 +
 .../release/0.16.3/RELEASENOTES.0.16.3.md       |   2 +-
 .../markdown/release/0.16.4/CHANGES.0.16.4.md   |   6 +
 .../release/0.16.4/RELEASENOTES.0.16.4.md       |   2 +-
 .../markdown/release/0.17.0/CHANGES.0.17.0.md   |   6 +
 .../release/0.17.0/RELEASENOTES.0.17.0.md       |   4 +-
 .../markdown/release/0.17.1/CHANGES.0.17.1.md   |   6 +
 .../release/0.17.1/RELEASENOTES.0.17.1.md       |   2 +-
 .../markdown/release/0.17.2/CHANGES.0.17.2.md   |   6 +
 .../release/0.17.2/RELEASENOTES.0.17.2.md       |   2 +-
 .../markdown/release/0.17.3/CHANGES.0.17.3.md   |   8 +-
 .../release/0.17.3/RELEASENOTES.0.17.3.md       |   2 +-
 .../markdown/release/0.18.0/CHANGES.0.18.0.md   |   6 +
 .../release/0.18.0/RELEASENOTES.0.18.0.md       |   2 +-
 .../markdown/release/0.18.1/CHANGES.0.18.1.md   |   6 +
 .../release/0.18.1/RELEASENOTES.0.18.1.md       |   2 +-
 .../markdown/release/0.18.2/CHANGES.0.18.2.md   |   6 +
 .../release/0.18.2/RELEASENOTES.0.18.2.md       |   2 +-
 .../markdown/release/0.18.3/CHANGES.0.18.3.md   |   6 +
 .../release/0.18.3/RELEASENOTES.0.18.3.md       |   2 +-
 .../markdown/release/0.18.4/CHANGES.0.18.4.md   |   8 +-
 .../release/0.18.4/RELEASENOTES.0.18.4.md       |   2 +-
 .../markdown/release/0.19.0/CHANGES.0.19.0.md   |   6 +
 .../release/0.19.0/RELEASENOTES.0.19.0.md       |   6 +-
 .../markdown/release/0.19.1/CHANGES.0.19.1.md   |   6 +
 .../release/0.19.1/RELEASENOTES.0.19.1.md       |   2 +-
 .../markdown/release/0.19.2/CHANGES.0.19.2.md   |   6 +
 .../release/0.19.2/RELEASENOTES.0.19.2.md       |   2 +-
 .../markdown/release/0.2.0/CHANGES.0.2.0.md     |   6 +
 .../release/0.2.0/RELEASENOTES.0.2.0.md         |   2 +-
 .../markdown/release/0.2.1/CHANGES.0.2.1.md     |   6 +
 .../release/0.2.1/RELEASENOTES.0.2.1.md         |   2 +-
 .../markdown/release/0.20.0/CHANGES.0.20.0.md   |   6 +
 .../release/0.20.0/RELEASENOTES.0.20.0.md       |   4 +-
 .../markdown/release/0.20.1/CHANGES.0.20.1.md   |   6 +
 .../release/0.20.1/RELEASENOTES.0.20.1.md       |   6 +-
 .../markdown/release/0.20.2/CHANGES.0.20.2.md   |   6 +
 .../release/0.20.2/RELEASENOTES.0.20.2.md       |   2 +-
 .../release/0.20.203.0/CHANGES.0.20.203.0.md    |   6 +
 .../0.20.203.0/RELEASENOTES.0.20.203.0.md       |  10 +-
 .../release/0.20.203.1/CHANGES.0.20.203.1.md    |   8 +-
 .../0.20.203.1/RELEASENOTES.0.20.203.1.md       |   2 +-
 .../release/0.20.204.0/CHANGES.0.20.204.0.md    |   6 +
 .../0.20.204.0/RELEASENOTES.0.20.204.0.md       |   2 +-
 .../release/0.20.205.0/CHANGES.0.20.205.0.md    |   6 +
 .../0.20.205.0/RELEASENOTES.0.20.205.0.md       |   2 +-
 .../markdown/release/0.20.3/CHANGES.0.20.3.md   |   8 +-
 .../release/0.20.3/RELEASENOTES.0.20.3.md       |   2 +-
 .../markdown/release/0.21.0/CHANGES.0.21.0.md   |   6 +
 .../release/0.21.0/RELEASENOTES.0.21.0.md       |  10 +-
 .../markdown/release/0.21.1/CHANGES.0.21.1.md   |   8 +-
 .../release/0.21.1/RELEASENOTES.0.21.1.md       |   6 +-
 .../markdown/release/0.22.0/CHANGES.0.22.0.md   |   6 +
 .../release/0.22.0/RELEASENOTES.0.22.0.md       |   7 +-
 .../markdown/release/0.22.1/CHANGES.0.22.1.md   |   8 +-
 .../release/0.22.1/RELEASENOTES.0.22.1.md       |   4 +-
 .../markdown/release/0.23.0/CHANGES.0.23.0.md   |   6 +
 .../release/0.23.0/RELEASENOTES.0.23.0.md       |  18 +-
 .../markdown/release/0.23.1/CHANGES.0.23.1.md   |   6 +
 .../release/0.23.1/RELEASENOTES.0.23.1.md       |   6 +-
 .../markdown/release/0.23.10/CHANGES.0.23.10.md |   6 +
 .../release/0.23.10/RELEASENOTES.0.23.10.md     |   2 +-
 .../markdown/release/0.23.11/CHANGES.0.23.11.md |   6 +
 .../release/0.23.11/RELEASENOTES.0.23.11.md     |   2 +-
 .../markdown/release/0.23.2/CHANGES.0.23.2.md   |   8 +-
 .../release/0.23.2/RELEASENOTES.0.23.2.md       |   4 +-
 .../markdown/release/0.23.3/CHANGES.0.23.3.md   |   6 +
 .../release/0.23.3/RELEASENOTES.0.23.3.md       |   2 +-
 .../markdown/release/0.23.4/CHANGES.0.23.4.md   |   6 +
 .../release/0.23.4/RELEASENOTES.0.23.4.md       |   2 +-
 .../markdown/release/0.23.5/CHANGES.0.23.5.md   |   6 +
 .../release/0.23.5/RELEASENOTES.0.23.5.md       |   2 +-
 .../markdown/release/0.23.6/CHANGES.0.23.6.md   |   6 +
 .../release/0.23.6/RELEASENOTES.0.23.6.md       |   2 +-
 .../markdown/release/0.23.7/CHANGES.0.23.7.md   |   6 +
 .../release/0.23.7/RELEASENOTES.0.23.7.md       |   2 +-
 .../markdown/release/0.23.8/CHANGES.0.23.8.md   |   6 +
 .../release/0.23.8/RELEASENOTES.0.23.8.md       |   2 +-
 .../markdown/release/0.23.9/CHANGES.0.23.9.md   |   6 +
 .../release/0.23.9/RELEASENOTES.0.23.9.md       |   2 +-
 .../markdown/release/0.24.0/CHANGES.0.24.0.md   |   8 +-
 .../release/0.24.0/RELEASENOTES.0.24.0.md       |   2 +-
 .../markdown/release/0.3.0/CHANGES.0.3.0.md     |   6 +
 .../release/0.3.0/RELEASENOTES.0.3.0.md         |   2 +-
 .../markdown/release/0.3.1/CHANGES.0.3.1.md     |   6 +
 .../release/0.3.1/RELEASENOTES.0.3.1.md         |   2 +-
 .../markdown/release/0.3.2/CHANGES.0.3.2.md     |   6 +
 .../release/0.3.2/RELEASENOTES.0.3.2.md         |   2 +-
 .../markdown/release/0.4.0/CHANGES.0.4.0.md     |   6 +
 .../release/0.4.0/RELEASENOTES.0.4.0.md         |   2 +-
 .../markdown/release/0.5.0/CHANGES.0.5.0.md     |   6 +
 .../release/0.5.0/RELEASENOTES.0.5.0.md         |   2 +-
 .../markdown/release/0.6.0/CHANGES.0.6.0.md     |   6 +
 .../release/0.6.0/RELEASENOTES.0.6.0.md         |   2 +-
 .../markdown/release/0.6.1/CHANGES.0.6.1.md     |   6 +
 .../release/0.6.1/RELEASENOTES.0.6.1.md         |   2 +-
 .../markdown/release/0.6.2/CHANGES.0.6.2.md     |   6 +
 .../release/0.6.2/RELEASENOTES.0.6.2.md         |   2 +-
 .../markdown/release/0.7.0/CHANGES.0.7.0.md     |   6 +
 .../release/0.7.0/RELEASENOTES.0.7.0.md         |   2 +-
 .../markdown/release/0.7.1/CHANGES.0.7.1.md     |   6 +
 .../release/0.7.1/RELEASENOTES.0.7.1.md         |   2 +-
 .../markdown/release/0.7.2/CHANGES.0.7.2.md     |   6 +
 .../release/0.7.2/RELEASENOTES.0.7.2.md         |   2 +-
 .../markdown/release/0.8.0/CHANGES.0.8.0.md     |   6 +
 .../release/0.8.0/RELEASENOTES.0.8.0.md         |   2 +-
 .../markdown/release/0.9.0/CHANGES.0.9.0.md     |   6 +
 .../release/0.9.0/RELEASENOTES.0.9.0.md         |   2 +-
 .../markdown/release/0.9.1/CHANGES.0.9.1.md     |   6 +
 .../release/0.9.1/RELEASENOTES.0.9.1.md         |   2 +-
 .../markdown/release/0.9.2/CHANGES.0.9.2.md     |   6 +
 .../release/0.9.2/RELEASENOTES.0.9.2.md         |   2 +-
 .../markdown/release/1.0.0/CHANGES.1.0.0.md     |   6 +
 .../release/1.0.0/RELEASENOTES.1.0.0.md         |   2 +-
 .../markdown/release/1.0.1/CHANGES.1.0.1.md     |   6 +
 .../release/1.0.1/RELEASENOTES.1.0.1.md         |   2 +-
 .../markdown/release/1.0.2/CHANGES.1.0.2.md     |   6 +
 .../release/1.0.2/RELEASENOTES.1.0.2.md         |   2 +-
 .../markdown/release/1.0.3/CHANGES.1.0.3.md     |   6 +
 .../release/1.0.3/RELEASENOTES.1.0.3.md         |   2 +-
 .../markdown/release/1.0.4/CHANGES.1.0.4.md     |   6 +
 .../release/1.0.4/RELEASENOTES.1.0.4.md         |   2 +-
 .../markdown/release/1.1.0/CHANGES.1.1.0.md     |   6 +
 .../release/1.1.0/RELEASENOTES.1.1.0.md         |   8 +-
 .../markdown/release/1.1.1/CHANGES.1.1.1.md     |   6 +
 .../release/1.1.1/RELEASENOTES.1.1.1.md         |   2 +-
 .../markdown/release/1.1.2/CHANGES.1.1.2.md     |   6 +
 .../release/1.1.2/RELEASENOTES.1.1.2.md         |   2 +-
 .../markdown/release/1.1.3/CHANGES.1.1.3.md     |   8 +-
 .../release/1.1.3/RELEASENOTES.1.1.3.md         |   2 +-
 .../markdown/release/1.2.0/CHANGES.1.2.0.md     |   6 +
 .../release/1.2.0/RELEASENOTES.1.2.0.md         |   4 +-
 .../markdown/release/1.2.1/CHANGES.1.2.1.md     |   6 +
 .../release/1.2.1/RELEASENOTES.1.2.1.md         |   2 +-
 .../markdown/release/1.2.2/CHANGES.1.2.2.md     |   8 +-
 .../release/1.2.2/RELEASENOTES.1.2.2.md         |   2 +-
 .../markdown/release/1.3.0/CHANGES.1.3.0.md     |   8 +-
 .../release/1.3.0/RELEASENOTES.1.3.0.md         |   2 +-
 .../release/2.0.0-alpha/CHANGES.2.0.0-alpha.md  |   6 +
 .../2.0.0-alpha/RELEASENOTES.2.0.0-alpha.md     |   2 +-
 .../release/2.0.1-alpha/CHANGES.2.0.1-alpha.md  |   6 +
 .../2.0.1-alpha/RELEASENOTES.2.0.1-alpha.md     |   2 +-
 .../release/2.0.2-alpha/CHANGES.2.0.2-alpha.md  |   6 +
 .../2.0.2-alpha/RELEASENOTES.2.0.2-alpha.md     |   2 +-
 .../release/2.0.3-alpha/CHANGES.2.0.3-alpha.md  |   6 +
 .../2.0.3-alpha/RELEASENOTES.2.0.3-alpha.md     |   4 +-
 .../release/2.0.4-alpha/CHANGES.2.0.4-alpha.md  |   6 +
 .../2.0.4-alpha/RELEASENOTES.2.0.4-alpha.md     |   2 +-
 .../release/2.0.5-alpha/CHANGES.2.0.5-alpha.md  |   6 +
 .../2.0.5-alpha/RELEASENOTES.2.0.5-alpha.md     |   2 +-
 .../release/2.0.6-alpha/CHANGES.2.0.6-alpha.md  |   6 +
 .../2.0.6-alpha/RELEASENOTES.2.0.6-alpha.md     |   2 +-
 .../release/2.1.0-beta/CHANGES.2.1.0-beta.md    |   6 +
 .../2.1.0-beta/RELEASENOTES.2.1.0-beta.md       |   2 +-
 .../release/2.1.1-beta/CHANGES.2.1.1-beta.md    |   6 +
 .../2.1.1-beta/RELEASENOTES.2.1.1-beta.md       |   2 +-
 .../markdown/release/2.2.0/CHANGES.2.2.0.md     |   6 +
 .../release/2.2.0/RELEASENOTES.2.2.0.md         |   2 +-
 .../markdown/release/2.2.1/CHANGES.2.2.1.md     |   8 +-
 .../release/2.2.1/RELEASENOTES.2.2.1.md         |   2 +-
 .../markdown/release/2.3.0/CHANGES.2.3.0.md     |  13 +-
 .../release/2.3.0/RELEASENOTES.2.3.0.md         |   3 +-
 .../markdown/release/2.4.0/CHANGES.2.4.0.md     |   7 +
 .../release/2.4.0/RELEASENOTES.2.4.0.md         |   2 +-
 .../markdown/release/2.4.1/CHANGES.2.4.1.md     |   6 +
 .../release/2.4.1/RELEASENOTES.2.4.1.md         |   2 +-
 .../markdown/release/2.5.0/CHANGES.2.5.0.md     |   8 +-
 .../release/2.5.0/RELEASENOTES.2.5.0.md         |  16 +-
 .../markdown/release/2.5.1/CHANGES.2.5.1.md     |   6 +
 .../release/2.5.1/RELEASENOTES.2.5.1.md         |   2 +-
 .../markdown/release/2.5.2/CHANGES.2.5.2.md     |   6 +
 .../release/2.5.2/RELEASENOTES.2.5.2.md         |   2 +-
 .../markdown/release/2.6.0/CHANGES.2.6.0.md     |  11 +-
 .../release/2.6.0/RELEASENOTES.2.6.0.md         |  37 +--
 .../markdown/release/2.6.1/CHANGES.2.6.1.md     | 228 +++++++++++++++++++
 .../release/2.6.1/RELEASENOTES.2.6.1.md         |  45 ++++
 .../markdown/release/2.6.2/CHANGES.2.6.2.md     |  83 +++++++
 .../release/2.6.2/RELEASENOTES.2.6.2.md         |  21 ++
 .../markdown/release/2.6.3/CHANGES.2.6.3.md     | 103 +++++++++
 .../release/2.6.3/RELEASENOTES.2.6.3.md         |  21 ++
 .../markdown/release/2.6.4/CHANGES.2.6.4.md     | 114 ++++++++++
 .../release/2.6.4/RELEASENOTES.2.6.4.md         |  28 +++
 .../markdown/release/2.7.0/CHANGES.2.7.0.md     |  16 +-
 .../release/2.7.0/RELEASENOTES.2.7.0.md         |  83 ++-----
 .../markdown/release/2.7.1/CHANGES.2.7.1.md     |  17 +-
 .../release/2.7.1/RELEASENOTES.2.7.1.md         |   7 +-
 .../markdown/release/2.7.2/CHANGES.2.7.2.md     | 224 ++++++++++++++++++
 .../release/2.7.2/RELEASENOTES.2.7.2.md         |  35 +++
 238 files changed, 1787 insertions(+), 296 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/CHANGES.0.1.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/CHANGES.0.1.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/CHANGES.0.1.0.md
index 434d7c3..38c4b90 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/CHANGES.0.1.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/CHANGES.0.1.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/RELEASENOTES.0.1.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/RELEASENOTES.0.1.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/RELEASENOTES.0.1.0.md
index d2b896d..0e4069b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/RELEASENOTES.0.1.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.0/RELEASENOTES.0.1.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.1.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/CHANGES.0.1.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/CHANGES.0.1.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/CHANGES.0.1.1.md
index 21dfefc..a40faad 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/CHANGES.0.1.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/CHANGES.0.1.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/RELEASENOTES.0.1.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/RELEASENOTES.0.1.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/RELEASENOTES.0.1.1.md
index 213673c..b40e8ca 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/RELEASENOTES.0.1.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.1.1/RELEASENOTES.0.1.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.1.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/CHANGES.0.10.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/CHANGES.0.10.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/CHANGES.0.10.0.md
index b09a09e..c0cda89 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/CHANGES.0.10.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/CHANGES.0.10.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/RELEASENOTES.0.10.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/RELEASENOTES.0.10.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/RELEASENOTES.0.10.0.md
index 63ab348..c0d13df 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/RELEASENOTES.0.10.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.0/RELEASENOTES.0.10.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.10.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/CHANGES.0.10.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/CHANGES.0.10.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/CHANGES.0.10.1.md
index dc0018d..0e97344 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/CHANGES.0.10.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/CHANGES.0.10.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/RELEASENOTES.0.10.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/RELEASENOTES.0.10.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/RELEASENOTES.0.10.1.md
index 15a0fd5..a672981 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/RELEASENOTES.0.10.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.10.1/RELEASENOTES.0.10.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.10.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/CHANGES.0.11.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/CHANGES.0.11.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/CHANGES.0.11.0.md
index ccd4565..fddbe01 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/CHANGES.0.11.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/CHANGES.0.11.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/RELEASENOTES.0.11.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/RELEASENOTES.0.11.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/RELEASENOTES.0.11.0.md
index 85d59dc..ba004ee 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/RELEASENOTES.0.11.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.0/RELEASENOTES.0.11.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.11.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/CHANGES.0.11.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/CHANGES.0.11.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/CHANGES.0.11.1.md
index 2a01ac4..87afd44 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/CHANGES.0.11.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/CHANGES.0.11.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/RELEASENOTES.0.11.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/RELEASENOTES.0.11.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/RELEASENOTES.0.11.1.md
index 119b8d2..6fb40d7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/RELEASENOTES.0.11.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.1/RELEASENOTES.0.11.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.11.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/CHANGES.0.11.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/CHANGES.0.11.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/CHANGES.0.11.2.md
index 669831d..0f3e7cd 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/CHANGES.0.11.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/CHANGES.0.11.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/RELEASENOTES.0.11.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/RELEASENOTES.0.11.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/RELEASENOTES.0.11.2.md
index 24da147..9a65d15 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/RELEASENOTES.0.11.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.11.2/RELEASENOTES.0.11.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.11.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/CHANGES.0.12.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/CHANGES.0.12.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/CHANGES.0.12.0.md
index bcac772..40e402c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/CHANGES.0.12.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/CHANGES.0.12.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/RELEASENOTES.0.12.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/RELEASENOTES.0.12.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/RELEASENOTES.0.12.0.md
index e8d86be..88d6d59 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/RELEASENOTES.0.12.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.0/RELEASENOTES.0.12.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.12.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/CHANGES.0.12.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/CHANGES.0.12.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/CHANGES.0.12.1.md
index 0c69aad..74a17c6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/CHANGES.0.12.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/CHANGES.0.12.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/RELEASENOTES.0.12.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/RELEASENOTES.0.12.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/RELEASENOTES.0.12.1.md
index 5051099..46739bc 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/RELEASENOTES.0.12.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.1/RELEASENOTES.0.12.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.12.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/CHANGES.0.12.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/CHANGES.0.12.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/CHANGES.0.12.2.md
index f473cbf..e70e14b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/CHANGES.0.12.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/CHANGES.0.12.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/RELEASENOTES.0.12.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/RELEASENOTES.0.12.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/RELEASENOTES.0.12.2.md
index dffca2b..f1fa65a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/RELEASENOTES.0.12.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.2/RELEASENOTES.0.12.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.12.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/CHANGES.0.12.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/CHANGES.0.12.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/CHANGES.0.12.3.md
index 62a8420..0bba5ae 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/CHANGES.0.12.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/CHANGES.0.12.3.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/RELEASENOTES.0.12.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/RELEASENOTES.0.12.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/RELEASENOTES.0.12.3.md
index a9cb651..923c7cc 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/RELEASENOTES.0.12.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.12.3/RELEASENOTES.0.12.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.12.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/CHANGES.0.13.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/CHANGES.0.13.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/CHANGES.0.13.0.md
index 5dcd5c6..d79fb5e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/CHANGES.0.13.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/CHANGES.0.13.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/RELEASENOTES.0.13.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/RELEASENOTES.0.13.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/RELEASENOTES.0.13.0.md
index 41c18bd..f404223 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/RELEASENOTES.0.13.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.13.0/RELEASENOTES.0.13.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.13.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/CHANGES.0.14.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/CHANGES.0.14.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/CHANGES.0.14.0.md
index 541f4c9..c432600 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/CHANGES.0.14.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/CHANGES.0.14.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/RELEASENOTES.0.14.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/RELEASENOTES.0.14.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/RELEASENOTES.0.14.0.md
index 9d34985..bc985e8 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/RELEASENOTES.0.14.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.0/RELEASENOTES.0.14.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.14.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/CHANGES.0.14.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/CHANGES.0.14.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/CHANGES.0.14.1.md
index 21b297a..01cff1d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/CHANGES.0.14.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/CHANGES.0.14.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/RELEASENOTES.0.14.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/RELEASENOTES.0.14.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/RELEASENOTES.0.14.1.md
index 40f7a45..929d9d9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/RELEASENOTES.0.14.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.1/RELEASENOTES.0.14.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.14.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/CHANGES.0.14.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/CHANGES.0.14.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/CHANGES.0.14.2.md
index dc53828..5e7d774 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/CHANGES.0.14.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/CHANGES.0.14.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/RELEASENOTES.0.14.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/RELEASENOTES.0.14.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/RELEASENOTES.0.14.2.md
index 33e5578..75b45f0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/RELEASENOTES.0.14.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.2/RELEASENOTES.0.14.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.14.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/CHANGES.0.14.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/CHANGES.0.14.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/CHANGES.0.14.3.md
index afc35b2..e418744 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/CHANGES.0.14.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/CHANGES.0.14.3.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/RELEASENOTES.0.14.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/RELEASENOTES.0.14.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/RELEASENOTES.0.14.3.md
index 222e0a0..249a95b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/RELEASENOTES.0.14.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.3/RELEASENOTES.0.14.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.14.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/CHANGES.0.14.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/CHANGES.0.14.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/CHANGES.0.14.4.md
index c2f17c5..f6bf9f1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/CHANGES.0.14.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/CHANGES.0.14.4.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/RELEASENOTES.0.14.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/RELEASENOTES.0.14.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/RELEASENOTES.0.14.4.md
index 2743082..b149d55 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/RELEASENOTES.0.14.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.14.4/RELEASENOTES.0.14.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.14.4 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/CHANGES.0.15.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/CHANGES.0.15.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/CHANGES.0.15.0.md
index e2d136b..cb85716 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/CHANGES.0.15.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/CHANGES.0.15.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/RELEASENOTES.0.15.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/RELEASENOTES.0.15.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/RELEASENOTES.0.15.0.md
index ea88f02..f7177ec 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/RELEASENOTES.0.15.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.0/RELEASENOTES.0.15.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.15.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/CHANGES.0.15.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/CHANGES.0.15.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/CHANGES.0.15.1.md
index 670e22a..acadf7d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/CHANGES.0.15.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/CHANGES.0.15.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/RELEASENOTES.0.15.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/RELEASENOTES.0.15.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/RELEASENOTES.0.15.1.md
index 05d175f..33b73c8 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/RELEASENOTES.0.15.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.1/RELEASENOTES.0.15.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.15.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/CHANGES.0.15.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/CHANGES.0.15.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/CHANGES.0.15.2.md
index 10fc49e..c046da1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/CHANGES.0.15.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/CHANGES.0.15.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/RELEASENOTES.0.15.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/RELEASENOTES.0.15.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/RELEASENOTES.0.15.2.md
index 801d958..3dfa438 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/RELEASENOTES.0.15.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.2/RELEASENOTES.0.15.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.15.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/CHANGES.0.15.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/CHANGES.0.15.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/CHANGES.0.15.3.md
index 9c15db6..4967b9f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/CHANGES.0.15.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/CHANGES.0.15.3.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/RELEASENOTES.0.15.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/RELEASENOTES.0.15.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/RELEASENOTES.0.15.3.md
index c5e628d..7fe66fa 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/RELEASENOTES.0.15.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.3/RELEASENOTES.0.15.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.15.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/CHANGES.0.15.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/CHANGES.0.15.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/CHANGES.0.15.4.md
index 41aa5f0..8cd3160 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/CHANGES.0.15.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/CHANGES.0.15.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.15.4 - Unreleased
+## Release 0.15.4 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/RELEASENOTES.0.15.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/RELEASENOTES.0.15.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/RELEASENOTES.0.15.4.md
index 679f0d3..1bdd2ab 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/RELEASENOTES.0.15.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.15.4/RELEASENOTES.0.15.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.15.4 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/CHANGES.0.16.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/CHANGES.0.16.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/CHANGES.0.16.0.md
index 1422bdc..4be7a96 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/CHANGES.0.16.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/CHANGES.0.16.0.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/RELEASENOTES.0.16.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/RELEASENOTES.0.16.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/RELEASENOTES.0.16.0.md
index e0ee479..377a6c2 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/RELEASENOTES.0.16.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.0/RELEASENOTES.0.16.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.16.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/CHANGES.0.16.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/CHANGES.0.16.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/CHANGES.0.16.1.md
index 4c403be..c989fd7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/CHANGES.0.16.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/CHANGES.0.16.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/RELEASENOTES.0.16.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/RELEASENOTES.0.16.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/RELEASENOTES.0.16.1.md
index 39257cc..51019be 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/RELEASENOTES.0.16.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.1/RELEASENOTES.0.16.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.16.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/CHANGES.0.16.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/CHANGES.0.16.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/CHANGES.0.16.2.md
index 1c528f4..dee3d74 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/CHANGES.0.16.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/CHANGES.0.16.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/RELEASENOTES.0.16.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/RELEASENOTES.0.16.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/RELEASENOTES.0.16.2.md
index 2ba4ee4..a63c10f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/RELEASENOTES.0.16.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.2/RELEASENOTES.0.16.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.16.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/CHANGES.0.16.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/CHANGES.0.16.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/CHANGES.0.16.3.md
index 955623b..b4552bb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/CHANGES.0.16.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/CHANGES.0.16.3.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/RELEASENOTES.0.16.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/RELEASENOTES.0.16.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/RELEASENOTES.0.16.3.md
index ae78110..0b5814c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/RELEASENOTES.0.16.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.3/RELEASENOTES.0.16.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.16.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/CHANGES.0.16.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/CHANGES.0.16.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/CHANGES.0.16.4.md
index ae027ce..3eac7ed 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/CHANGES.0.16.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/CHANGES.0.16.4.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/RELEASENOTES.0.16.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/RELEASENOTES.0.16.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/RELEASENOTES.0.16.4.md
index fb5fc83..a3d4c8c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/RELEASENOTES.0.16.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.16.4/RELEASENOTES.0.16.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.16.4 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/CHANGES.0.17.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/CHANGES.0.17.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/CHANGES.0.17.0.md
index bf0fd32..bbf3d23 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/CHANGES.0.17.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/CHANGES.0.17.0.md
@@ -61,6 +61,12 @@
 | [HADOOP-771](https://issues.apache.org/jira/browse/HADOOP-771) | Namenode should return error when trying to delete non-empty directory |  Major | . | Milind Bhandarkar | Mahadev konar |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/RELEASENOTES.0.17.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/RELEASENOTES.0.17.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/RELEASENOTES.0.17.0.md
index 467f2ac..c8c2794 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/RELEASENOTES.0.17.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.0/RELEASENOTES.0.17.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.17.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -416,7 +416,7 @@ Change DFS block placement to allocate the first replica locally, the second off
 
 * [HADOOP-2551](https://issues.apache.org/jira/browse/HADOOP-2551) | *Blocker* | **hadoop-env.sh needs finer granularity**
 
-New environment variables were introduced to allow finer grained control of Java options passed to server and client JVMs.  See the new *\_OPTS variables in conf/hadoop-env.sh.
+New environment variables were introduced to allow finer grained control of Java options passed to server and client JVMs.  See the new \*\_OPTS variables in conf/hadoop-env.sh.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/CHANGES.0.17.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/CHANGES.0.17.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/CHANGES.0.17.1.md
index 991cbd7..f69eb78 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/CHANGES.0.17.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/CHANGES.0.17.1.md
@@ -27,6 +27,12 @@
 | [HADOOP-3565](https://issues.apache.org/jira/browse/HADOOP-3565) | JavaSerialization can throw java.io.StreamCorruptedException |  Major | . | Tom White | Tom White |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/RELEASENOTES.0.17.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/RELEASENOTES.0.17.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/RELEASENOTES.0.17.1.md
index 7cc43a3..ee48400 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/RELEASENOTES.0.17.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.1/RELEASENOTES.0.17.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.17.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/CHANGES.0.17.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/CHANGES.0.17.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/CHANGES.0.17.2.md
index 2ee5df6..db3eac4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/CHANGES.0.17.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/CHANGES.0.17.2.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/RELEASENOTES.0.17.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/RELEASENOTES.0.17.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/RELEASENOTES.0.17.2.md
index 22c90b8..27e4924 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/RELEASENOTES.0.17.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.2/RELEASENOTES.0.17.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.17.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/CHANGES.0.17.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/CHANGES.0.17.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/CHANGES.0.17.3.md
index 1b8a3ed..4b7b7b1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/CHANGES.0.17.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/CHANGES.0.17.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.17.3 - Unreleased
+## Release 0.17.3 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/RELEASENOTES.0.17.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/RELEASENOTES.0.17.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/RELEASENOTES.0.17.3.md
index dd01926..ada8b27 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/RELEASENOTES.0.17.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.17.3/RELEASENOTES.0.17.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.17.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/CHANGES.0.18.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/CHANGES.0.18.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/CHANGES.0.18.0.md
index 7df47c0..202f434 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/CHANGES.0.18.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/CHANGES.0.18.0.md
@@ -65,6 +65,12 @@
 | [HADOOP-544](https://issues.apache.org/jira/browse/HADOOP-544) | Replace the job, tip and task ids with objects. |  Major | . | Owen O'Malley | Enis Soztutar |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/RELEASENOTES.0.18.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/RELEASENOTES.0.18.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/RELEASENOTES.0.18.0.md
index 81278d1..f57c602 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/RELEASENOTES.0.18.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.0/RELEASENOTES.0.18.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.18.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/CHANGES.0.18.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/CHANGES.0.18.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/CHANGES.0.18.1.md
index 8fa57cb..dec3ae1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/CHANGES.0.18.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/CHANGES.0.18.1.md
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/RELEASENOTES.0.18.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/RELEASENOTES.0.18.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/RELEASENOTES.0.18.1.md
index 1367443..e9d1c06 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/RELEASENOTES.0.18.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.1/RELEASENOTES.0.18.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.18.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/CHANGES.0.18.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/CHANGES.0.18.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/CHANGES.0.18.2.md
index 774cfca..d7e6ac7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/CHANGES.0.18.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/CHANGES.0.18.2.md
@@ -27,6 +27,12 @@
 | [HADOOP-4228](https://issues.apache.org/jira/browse/HADOOP-4228) | dfs datanode metrics, bytes\_read, bytes\_written overflows due to incorrect type used. |  Blocker | metrics | Eric Yang | Eric Yang |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/RELEASENOTES.0.18.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/RELEASENOTES.0.18.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/RELEASENOTES.0.18.2.md
index a9ec7c3..e808cd3 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/RELEASENOTES.0.18.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.2/RELEASENOTES.0.18.2.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.18.2 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/CHANGES.0.18.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/CHANGES.0.18.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/CHANGES.0.18.3.md
index 97d6476..7c33c5e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/CHANGES.0.18.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/CHANGES.0.18.3.md
@@ -29,6 +29,12 @@
 | [HADOOP-4061](https://issues.apache.org/jira/browse/HADOOP-4061) | Large number of decommission freezes the Namenode |  Major | . | Koji Noguchi | Tsz Wo Nicholas Sze |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/RELEASENOTES.0.18.3.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/RELEASENOTES.0.18.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/RELEASENOTES.0.18.3.md
index 7c767da..4dd9c06 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/RELEASENOTES.0.18.3.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.3/RELEASENOTES.0.18.3.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.18.3 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/CHANGES.0.18.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/CHANGES.0.18.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/CHANGES.0.18.4.md
index b5cb699..da4f4c1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/CHANGES.0.18.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/CHANGES.0.18.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop Changelog
 
-## Release 0.18.4 - Unreleased
+## Release 0.18.4 - Unreleased (as of 2016-03-04)
 
 ### INCOMPATIBLE CHANGES:
 
@@ -26,6 +26,12 @@
 |:---- |:---- | :--- |:---- |:---- |:---- |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/RELEASENOTES.0.18.4.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/RELEASENOTES.0.18.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/RELEASENOTES.0.18.4.md
index 725c5c4..732ff7f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/RELEASENOTES.0.18.4.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.18.4/RELEASENOTES.0.18.4.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.18.4 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/CHANGES.0.19.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/CHANGES.0.19.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/CHANGES.0.19.0.md
index 7b62b09..29e9ab9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/CHANGES.0.19.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/CHANGES.0.19.0.md
@@ -51,6 +51,12 @@
 | [HADOOP-1700](https://issues.apache.org/jira/browse/HADOOP-1700) | Append to files in HDFS |  Major | . | stack | dhruba borthakur |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/RELEASENOTES.0.19.0.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/RELEASENOTES.0.19.0.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/RELEASENOTES.0.19.0.md
index 2348e76..187b087 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/RELEASENOTES.0.19.0.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.0/RELEASENOTES.0.19.0.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.19.0 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---
@@ -304,7 +304,7 @@ Adds a new contrib, bash-tab-completion, which enables bash tab completion for t
 
 * [HADOOP-3702](https://issues.apache.org/jira/browse/HADOOP-3702) | *Major* | **add support for chaining Maps in a single Map and after a Reduce [M\*/RM\*]**
 
-Introduced ChainMapper and the ChainReducer classes to allow composing chains of Maps and Reduces in a single Map/Reduce job, something like MAP+ REDUCE MAP*.
+Introduced ChainMapper and the ChainReducer classes to allow composing chains of Maps and Reduces in a single Map/Reduce job, something like MAP+ REDUCE MAP\*.
 
 
 ---
@@ -474,7 +474,7 @@ Introduced LZOP codec.
 
 * [HADOOP-2411](https://issues.apache.org/jira/browse/HADOOP-2411) | *Major* | **Add support for larger EC2 instance types**
 
-Added support for c1.* instance types and associated kernels for EC2.
+Added support for c1.\* instance types and associated kernels for EC2.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/CHANGES.0.19.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/CHANGES.0.19.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/CHANGES.0.19.1.md
index 275ea5b..fcc53f1 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/CHANGES.0.19.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/CHANGES.0.19.1.md
@@ -29,6 +29,12 @@
 | [HADOOP-4061](https://issues.apache.org/jira/browse/HADOOP-4061) | Large number of decommission freezes the Namenode |  Major | . | Koji Noguchi | Tsz Wo Nicholas Sze |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/RELEASENOTES.0.19.1.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/RELEASENOTES.0.19.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/RELEASENOTES.0.19.1.md
index 25984a4..f236710 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/RELEASENOTES.0.19.1.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.1/RELEASENOTES.0.19.1.md
@@ -18,7 +18,7 @@
 -->
 # Apache Hadoop  0.19.1 Release Notes
 
-These release notes cover new developer and user-facing incompatibilities, features, and major improvements.
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
 
 
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f73a1c/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/CHANGES.0.19.2.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/CHANGES.0.19.2.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/CHANGES.0.19.2.md
index 0a823a2..2071806 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/CHANGES.0.19.2.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/0.19.2/CHANGES.0.19.2.md
@@ -27,6 +27,12 @@
 | [HADOOP-5332](https://issues.apache.org/jira/browse/HADOOP-5332) | Make support for file append API configurable |  Blocker | . | Nigel Daley | dhruba borthakur |
 
 
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+
+
 ### NEW FEATURES:
 
 | JIRA | Summary | Priority | Component | Reporter | Contributor |


[17/34] hadoop git commit: HADOOP-12860. Expand section "Data Encryption on HTTP" in SecureMode documentation. Contributed by Wei-Chiu Chuang.

Posted by ar...@apache.org.
HADOOP-12860. Expand section "Data Encryption on HTTP" in SecureMode documentation. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f86850b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f86850b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f86850b5

Branch: refs/heads/HDFS-1312
Commit: f86850b544dcb34ee3c9336fad584309e886dbed
Parents: c2140d0
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Mar 8 14:28:47 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Mar 8 14:28:47 2016 +0900

----------------------------------------------------------------------
 .../src/site/markdown/SecureMode.md             | 27 +++++++++++++-------
 .../src/site/markdown/TimelineServer.md         |  2 +-
 2 files changed, 19 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f86850b5/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
index adc3a93..d206b53 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
@@ -195,6 +195,13 @@ AES offers the greatest cryptographic strength and the best performance. At this
 
 Data transfer between Web-console and clients are protected by using SSL(HTTPS). SSL configuration is recommended but not required to configure Hadoop security with Kerberos.
 
+To enable SSL for web console of HDFS daemons, set `dfs.http.policy` to either `HTTPS_ONLY` or `HTTP_AND_HTTPS` in hdfs-site.xml.
+Note that this does not affect KMS nor HttpFS, as they are implemented on top of Tomcat and do not respect this parameter. See [Hadoop KMS](../../hadoop-kms/index.html) and [Hadoop HDFS over HTTP - Server Setup](../../hadoop-hdfs-httpfs/ServerSetup.html) for instructions on enabling KMS over HTTPS and HttpFS over HTTPS, respectively.
+
+To enable SSL for web console of YARN daemons, set `yarn.http.policy` to `HTTPS_ONLY` in yarn-site.xml.
+
+To enable SSL for web console of MapReduce JobHistory server, set `mapreduce.jobhistory.http.policy` to `HTTPS_ONLY` in mapred-site.xml.
+
 Configuration
 -------------
 
@@ -249,19 +256,18 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
 | Parameter                    | Value                                           | Notes                                                                                                                                                                                                                                                                                                                                                                                              |
 |:-----------------------------|:------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
 | `dfs.http.policy`            | `HTTP_ONLY` or `HTTPS_ONLY` or `HTTP_AND_HTTPS` | `HTTPS_ONLY` turns off http access. This option takes precedence over the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. If using SASL to authenticate data transfer protocol instead of running DataNode as root and using privileged ports, then this property must be set to `HTTPS_ONLY` to guarantee authentication of HTTP servers. (See `dfs.data.transfer.protection`.) |
-| `dfs.namenode.https-address` | `nn_host_fqdn:50470`                            |                                                                                                                                                                                                                                                                                                                                                                                                    |
-| `dfs.https.port`             | `50470`                                         |                                                                                                                                                                                                                                                                                                                                                                                                    |
+| `dfs.namenode.https-address` | `0.0.0.0:50470`                                 | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details.                                                                                                                                                 |
 | `dfs.https.enable`           | `true`                                          | This value is deprecated. `Use dfs.http.policy`                                                                                                                                                                                                                                                                                                                                                    |
 
 ### Secondary NameNode
 
 | Parameter                                                   | Value                                    | Notes                                                                                                                                                                                                                                                                                                                                                                                                                                                                            |
 |:------------------------------------------------------------|:-----------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `dfs.namenode.secondary.http-address`                       | `snn_host_fqdn:50090`                    |                                                                                                                                                                                                                                                                                                                                                                                                                                                                                  |
+| `dfs.namenode.secondary.http-address`                       | `0.0.0.0:50090`                          | HTTP web UI address for the Secondary NameNode.                                                                                                                                                                                                                                                                                                                                                                                                                                  |
+| `dfs.namenode.secondary.https-address`                      | `0.0.0.0:50091`                          | HTTPS web UI address for the Secondary NameNode.                                                                                                                                                                                                                                                                                                                                                                                                                                 |
 | `dfs.secondary.namenode.keytab.file`                        | `/etc/security/keytab/sn.service.keytab` | Kerberos keytab file for the Secondary NameNode.                                                                                                                                                                                                                                                                                                                                                                                                                                 |
 | `dfs.secondary.namenode.kerberos.principal`                 | `sn/_HOST@REALM.TLD`                     | Kerberos principal name for the Secondary NameNode.                                                                                                                                                                                                                                                                                                                                                                                                                              |
 | `dfs.secondary.namenode.kerberos.internal.spnego.principal` | `HTTP/_HOST@REALM.TLD`                   | The server principal used by the Secondary NameNode for web UI SPNEGO authentication. The SPNEGO server principal begins with the prefix `HTTP/` by convention. If the value is `'*'`, the web server will attempt to login with every principal specified in the keytab file `dfs.web.authentication.kerberos.keytab`. For most deployments this can be set to `${dfs.web.authentication.kerberos.principal}` i.e use the value of `dfs.web.authentication.kerberos.principal`. |
-| `dfs.namenode.secondary.https-port`                         | `50470`                                  |                                                                                                                                                                                                                                                                                                                                                                                                                                                                                  |
 
 ### JournalNode
 
@@ -271,6 +277,7 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
 | `dfs.journalnode.keytab.file`                        | `/etc/security/keytab/jn.service.keytab`     | Kerberos keytab file for the JournalNode.                                                                                                                                                                                                                                                                                                                                                                                                                                                                   |
 | `dfs.journalnode.kerberos.internal.spnego.principal` | `HTTP/_HOST@REALM.TLD`                       | The server principal used by the JournalNode for web UI SPNEGO authentication when Kerberos security is enabled. The SPNEGO server principal begins with the prefix `HTTP/` by convention. If the value is `'*'`, the web server will attempt to login with every principal specified in the keytab file `dfs.web.authentication.kerberos.keytab`. For most deployments this can be set to `${dfs.web.authentication.kerberos.principal}` i.e use the value of `dfs.web.authentication.kerberos.principal`. |
 | `dfs.web.authentication.kerberos.keytab`             | `/etc/security/keytab/spnego.service.keytab` | SPNEGO keytab file for the JournalNode. In HA clusters this setting is shared with the Name Nodes.                                                                                                                                                                                                                                                                                                                                                                                                          |
+| `dfs.journalnode.https-address`                      | `0.0.0.0:8481`                               | HTTPS web UI address for the JournalNode.                                                                                                                                                                                                                                                                                                                                                                                                                                                                   |
 
 ### DataNode
 
@@ -279,7 +286,7 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
 | `dfs.datanode.data.dir.perm`                     | `700`                                    |                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                              |
 | `dfs.datanode.address`                           | `0.0.0.0:1004`                           | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc. Alternatively, this must be set to a non-privileged port if using SASL to authenticate data transfer protocol. (See `dfs.data.transfer.protection`.)                                                                                                                                                                                                                  |
 | `dfs.datanode.http.address`                      | `0.0.0.0:1006`                           | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc.                                                                                                                                                                                                                                                                                                                                                                       |
-| `dfs.datanode.https.address`                     | `0.0.0.0:50470`                          |                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                              |
+| `dfs.datanode.https.address`                     | `0.0.0.0:50475`                          | HTTPS web UI address for the Data Node.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                  |
 | `dfs.datanode.kerberos.principal`                | `dn/_HOST@REALM.TLD`                     | Kerberos principal name for the DataNode.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    |
 | `dfs.datanode.keytab.file`                       | `/etc/security/keytab/dn.service.keytab` | Kerberos keytab file for the DataNode.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       |
 | `dfs.encrypt.data.transfer`                      | `false`                                  | set to `true` when using data encryption                                                                                                                                                                                                                                                                                                                                                                                                                                                                                     |
@@ -297,10 +304,11 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
 
 ### ResourceManager
 
-| Parameter                        | Value                                    | Notes                                            |
-|:---------------------------------|:-----------------------------------------|:-------------------------------------------------|
-| `yarn.resourcemanager.principal` | `rm/_HOST@REALM.TLD`                     | Kerberos principal name for the ResourceManager. |
-| `yarn.resourcemanager.keytab`    | `/etc/security/keytab/rm.service.keytab` | Kerberos keytab file for the ResourceManager.    |
+| Parameter                                    | Value                                    | Notes                                                                                                                                                                                                                                                                                     |
+|:---------------------------------------------|:-----------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `yarn.resourcemanager.principal`             | `rm/_HOST@REALM.TLD`                     | Kerberos principal name for the ResourceManager.                                                                                                                                                                                                                                          |
+| `yarn.resourcemanager.keytab`                | `/etc/security/keytab/rm.service.keytab` | Kerberos keytab file for the ResourceManager.                                                                                                                                                                                                                                             |
+| `yarn.resourcemanager.webapp.https.address`  | `${yarn.resourcemanager.hostname}:8090`  | The https adddress of the RM web application for non-HA. In HA clusters, use `yarn.resourcemanager.webapp.https.address.`*rm-id* for each ResourceManager. See [ResourceManager High Availability](../../hadoop-yarn/hadoop-yarn-site/ResourceManagerHA.html#Configurations) for details. |
 
 ### NodeManager
 
@@ -311,6 +319,7 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
 | `yarn.nodemanager.container-executor.class`       | `org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor` | Use LinuxContainerExecutor.                             |
 | `yarn.nodemanager.linux-container-executor.group` | `hadoop`                                                           | Unix group of the NodeManager.                          |
 | `yarn.nodemanager.linux-container-executor.path`  | `/path/to/bin/container-executor`                                  | The path to the executable of Linux container executor. |
+| `yarn.nodemanager.webapp.https.address`           | `0.0.0.0:8044`                                                     | The https adddress of the NM web application.           |
 
 ### Configuration for WebAppProxy
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f86850b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index b10c860..8ef7d9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -176,7 +176,7 @@ and cluster operators.
 
 Note that the selection between the HTTP and HTTPS binding is made in the `TimelineClient` based
 upon the value of the YARN-wide configuration option `yarn.http.policy`; the HTTPS endpoint will be
-selected if this policy is either of `HTTPS_ONLY` or `HTTP_AND_HTTPS`.
+selected if this policy is `HTTPS_ONLY`.
 
 #### Advanced Server-side configuration
 


[04/34] hadoop git commit: MAPREDUCE-6648. Add yarn.app.mapreduce.am.log.level to mapred-default.xml (harsh)

Posted by ar...@apache.org.
MAPREDUCE-6648. Add yarn.app.mapreduce.am.log.level to mapred-default.xml (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f9fe3ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f9fe3ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f9fe3ac

Branch: refs/heads/HDFS-1312
Commit: 4f9fe3ac0370e2903796806ee5276fe8ba93df41
Parents: e1ccc96
Author: Harsh J <ha...@cloudera.com>
Authored: Sat Mar 5 14:26:06 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Mar 7 13:09:57 2016 +0530

----------------------------------------------------------------------
 .../src/main/resources/mapred-default.xml                 | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f9fe3ac/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index dc5c3dd..da25a99 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -351,6 +351,16 @@
 </property>
 
 <property>
+  <name>yarn.app.mapreduce.am.log.level</name>
+  <value>INFO</value>
+  <description>The logging level for the MR ApplicationMaster. The allowed
+  levels are: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+  The setting here could be overriden if "mapreduce.job.log4j-properties-file"
+  is set.
+  </description>
+</property>
+
+<property>
   <name>mapreduce.map.log.level</name>
   <value>INFO</value>
   <description>The logging level for the map task. The allowed levels are:


[06/34] hadoop git commit: YARN-4245. Generalize config file handling in container-executor. Contributed by Sidharta Seethana.

Posted by ar...@apache.org.
YARN-4245. Generalize config file handling in container-executor. Contributed by Sidharta Seethana.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ed2e060
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ed2e060
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ed2e060

Branch: refs/heads/HDFS-1312
Commit: 8ed2e060e80c0def3fcb7604e0bd27c1c24d291e
Parents: e51a8c1
Author: Varun Vasudev <vv...@apache.org>
Authored: Mon Mar 7 16:18:35 2016 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Mon Mar 7 16:18:35 2016 +0530

----------------------------------------------------------------------
 .../container-executor/impl/configuration.c     | 98 ++++++++++----------
 .../container-executor/impl/configuration.h     | 28 +++++-
 .../impl/container-executor.c                   | 27 +++++-
 .../impl/container-executor.h                   | 13 ++-
 .../main/native/container-executor/impl/main.c  |  4 +-
 .../test/test-container-executor.c              |  8 +-
 6 files changed, 112 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed2e060/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 17cce75..3447524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -34,34 +34,22 @@
 
 #define MAX_SIZE 10
 
-struct confentry {
-  const char *key;
-  const char *value;
-};
-
-struct configuration {
-  int size;
-  struct confentry **confdetails;
-};
-
-struct configuration config={.size=0, .confdetails=NULL};
-
 //clean up method for freeing configuration
-void free_configurations() {
+void free_configurations(struct configuration *cfg) {
   int i = 0;
-  for (i = 0; i < config.size; i++) {
-    if (config.confdetails[i]->key != NULL) {
-      free((void *)config.confdetails[i]->key);
+  for (i = 0; i < cfg->size; i++) {
+    if (cfg->confdetails[i]->key != NULL) {
+      free((void *)cfg->confdetails[i]->key);
     }
-    if (config.confdetails[i]->value != NULL) {
-      free((void *)config.confdetails[i]->value);
+    if (cfg->confdetails[i]->value != NULL) {
+      free((void *)cfg->confdetails[i]->value);
     }
-    free(config.confdetails[i]);
+    free(cfg->confdetails[i]);
   }
-  if (config.size > 0) {
-    free(config.confdetails);
+  if (cfg->size > 0) {
+    free(cfg->confdetails);
   }
-  config.size = 0;
+  cfg->size = 0;
 }
 
 /**
@@ -133,8 +121,8 @@ int check_configuration_permissions(const char* file_name) {
   return 0;
 }
 
-//function used to load the configurations present in the secure config
-void read_config(const char* file_name) {
+
+void read_config(const char* file_name, struct configuration *cfg) {
   FILE *conf_file;
   char *line;
   char *equaltok;
@@ -152,9 +140,9 @@ void read_config(const char* file_name) {
   #endif
 
   //allocate space for ten configuration items.
-  config.confdetails = (struct confentry **) malloc(sizeof(struct confentry *)
+  cfg->confdetails = (struct confentry **) malloc(sizeof(struct confentry *)
       * MAX_SIZE);
-  config.size = 0;
+  cfg->size = 0;
   conf_file = fopen(file_name, "r");
   if (conf_file == NULL) {
     fprintf(ERRORFILE, "Invalid conf file provided : %s \n", file_name);
@@ -196,9 +184,9 @@ void read_config(const char* file_name) {
       free(line);
       continue;
     }
-    config.confdetails[config.size] = (struct confentry *) malloc(
+    cfg->confdetails[cfg->size] = (struct confentry *) malloc(
             sizeof(struct confentry));
-    if(config.confdetails[config.size] == NULL) {
+    if(cfg->confdetails[cfg->size] == NULL) {
       fprintf(LOGFILE,
           "Failed allocating memory for single configuration item\n");
       goto cleanup;
@@ -208,10 +196,10 @@ void read_config(const char* file_name) {
       fprintf(LOGFILE, "read_config : Adding conf key : %s \n", equaltok);
     #endif
 
-    memset(config.confdetails[config.size], 0, sizeof(struct confentry));
-    config.confdetails[config.size]->key = (char *) malloc(
+    memset(cfg->confdetails[cfg->size], 0, sizeof(struct confentry));
+    cfg->confdetails[cfg->size]->key = (char *) malloc(
             sizeof(char) * (strlen(equaltok)+1));
-    strcpy((char *)config.confdetails[config.size]->key, equaltok);
+    strcpy((char *)cfg->confdetails[cfg->size]->key, equaltok);
     equaltok = strtok_r(NULL, "=", &temp_equaltok);
     if (equaltok == NULL) {
       fprintf(LOGFILE, "configuration tokenization failed \n");
@@ -220,8 +208,8 @@ void read_config(const char* file_name) {
     //means value is commented so don't store the key
     if(equaltok[0] == '#') {
       free(line);
-      free((void *)config.confdetails[config.size]->key);
-      free(config.confdetails[config.size]);
+      free((void *)cfg->confdetails[cfg->size]->key);
+      free(cfg->confdetails[cfg->size]);
       continue;
     }
 
@@ -229,27 +217,29 @@ void read_config(const char* file_name) {
       fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok);
     #endif
 
-    config.confdetails[config.size]->value = (char *) malloc(
+    cfg->confdetails[cfg->size]->value = (char *) malloc(
             sizeof(char) * (strlen(equaltok)+1));
-    strcpy((char *)config.confdetails[config.size]->value, equaltok);
-    if((config.size + 1) % MAX_SIZE  == 0) {
-      config.confdetails = (struct confentry **) realloc(config.confdetails,
-          sizeof(struct confentry **) * (MAX_SIZE + config.size));
-      if (config.confdetails == NULL) {
+    strcpy((char *)cfg->confdetails[cfg->size]->value, equaltok);
+    if((cfg->size + 1) % MAX_SIZE  == 0) {
+      cfg->confdetails = (struct confentry **) realloc(cfg->confdetails,
+          sizeof(struct confentry **) * (MAX_SIZE + cfg->size));
+      if (cfg->confdetails == NULL) {
         fprintf(LOGFILE,
             "Failed re-allocating memory for configuration items\n");
         goto cleanup;
       }
     }
-    if(config.confdetails[config.size] )
-    config.size++;
+    if(cfg->confdetails[cfg->size]) {
+        cfg->size++;
+    }
+
     free(line);
   }
  
   //close the file
   fclose(conf_file);
 
-  if (config.size == 0) {
+  if (cfg->size == 0) {
     fprintf(ERRORFILE, "Invalid configuration provided in %s\n", file_name);
     exit(INVALID_CONFIG_FILE);
   }
@@ -262,7 +252,7 @@ void read_config(const char* file_name) {
     free(line);
   }
   fclose(conf_file);
-  free_configurations();
+  free_configurations(cfg);
   return;
 }
 
@@ -272,11 +262,11 @@ void read_config(const char* file_name) {
  * array, next time onwards used the populated array.
  *
  */
-char * get_value(const char* key) {
+char * get_value(const char* key, struct configuration *cfg) {
   int count;
-  for (count = 0; count < config.size; count++) {
-    if (strcmp(config.confdetails[count]->key, key) == 0) {
-      return strdup(config.confdetails[count]->value);
+  for (count = 0; count < cfg->size; count++) {
+    if (strcmp(cfg->confdetails[count]->key, key) == 0) {
+      return strdup(cfg->confdetails[count]->value);
     }
   }
   return NULL;
@@ -286,11 +276,21 @@ char * get_value(const char* key) {
  * Function to return an array of values for a key.
  * Value delimiter is assumed to be a ','.
  */
-char ** get_values(const char * key) {
-  char *value = get_value(key);
+char ** get_values(const char * key, struct configuration *cfg) {
+  char *value = get_value(key, cfg);
   return extract_values_delim(value, ",");
 }
 
+/**
+ * Function to return an array of values for a key, using the specified
+ delimiter.
+ */
+char ** get_values_delim(const char * key, struct configuration *cfg,
+    const char *delim) {
+  char *value = get_value(key, cfg);
+  return extract_values_delim(value, delim);
+}
+
 char ** extract_values_delim(char *value, const char *delim) {
   char ** toPass = NULL;
   char *tempTok = NULL;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed2e060/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
index de5cc1d..8f87cb2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h
@@ -37,15 +37,33 @@ int check_configuration_permissions(const char* file_name);
  */
 char *resolve_config_path(const char* file_name, const char *root);
 
-// read the given configuration file
-void read_config(const char* config_file);
+// Config data structures.
+struct confentry {
+  const char *key;
+  const char *value;
+};
+
+struct configuration {
+  int size;
+  struct confentry **confdetails;
+};
+
+// read the given configuration file into the specified config struct.
+void read_config(const char* config_file, struct configuration *cfg);
 
 //method exposed to get the configurations
-char *get_value(const char* key);
+char *get_value(const char* key, struct configuration *cfg);
 
 //function to return array of values pointing to the key. Values are
 //comma seperated strings.
-char ** get_values(const char* key);
+char ** get_values(const char* key, struct configuration *cfg);
+
+/**
+ * Function to return an array of values for a key, using the specified
+ delimiter.
+ */
+char ** get_values_delim(const char * key, struct configuration *cfg,
+    const char *delim);
 
 // Extracts array of values from the comma separated list of values.
 char ** extract_values(char *value);
@@ -56,7 +74,7 @@ char ** extract_values_delim(char *value, const char *delim);
 void free_values(char** values);
 
 //method to free allocated configuration
-void free_configurations();
+void free_configurations(struct configuration *cfg);
 
 /**
  * If str is a string of the form key=val, find 'key'

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed2e060/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 44de2bb..c3cc96c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -59,6 +59,8 @@ FILE* ERRORFILE = NULL;
 static uid_t nm_uid = -1;
 static gid_t nm_gid = -1;
 
+struct configuration executor_cfg = {.size=0, .confdetails=NULL};
+
 char *concatenate(char *concat_pattern, char *return_path_name,
    int numArgs, ...);
 
@@ -67,6 +69,21 @@ void set_nm_uid(uid_t user, gid_t group) {
   nm_gid = group;
 }
 
+//function used to load the configurations present in the secure config
+void read_executor_config(const char* file_name) {
+    read_config(file_name, &executor_cfg);
+}
+
+//function used to free executor configuration data
+void free_executor_configurations() {
+    free_configurations(&executor_cfg);
+}
+
+//Lookup nodemanager group from container executor configuration.
+char *get_nodemanager_group() {
+    return get_value(NM_GROUP_KEY, &executor_cfg);
+}
+
 /**
  * get the executable filename.
  */
@@ -658,7 +675,7 @@ static struct passwd* get_user_info(const char* user) {
 }
 
 int is_whitelisted(const char *user) {
-  char **whitelist = get_values(ALLOWED_SYSTEM_USERS_KEY);
+  char **whitelist = get_values(ALLOWED_SYSTEM_USERS_KEY, &executor_cfg);
   char **users = whitelist;
   if (whitelist != NULL) {
     for(; *users; ++users) {
@@ -686,7 +703,7 @@ struct passwd* check_user(const char *user) {
     fflush(LOGFILE);
     return NULL;
   }
-  char *min_uid_str = get_value(MIN_USERID_KEY);
+  char *min_uid_str = get_value(MIN_USERID_KEY, &executor_cfg);
   int min_uid = DEFAULT_MIN_USERID;
   if (min_uid_str != NULL) {
     char *end_ptr = NULL;
@@ -713,7 +730,7 @@ struct passwd* check_user(const char *user) {
     free(user_info);
     return NULL;
   }
-  char **banned_users = get_values(BANNED_USERS_KEY);
+  char **banned_users = get_values(BANNED_USERS_KEY, &executor_cfg);
   banned_users = banned_users == NULL ?
     (char**) DEFAULT_BANNED_USERS : banned_users;
   char **banned_user = banned_users;
@@ -1062,7 +1079,7 @@ char* parse_docker_command_file(const char* command_file) {
 
 int run_docker(const char *command_file) {
   char* docker_command = parse_docker_command_file(command_file);
-  char* docker_binary = get_value(DOCKER_BINARY_KEY);
+  char* docker_binary = get_value(DOCKER_BINARY_KEY, &executor_cfg);
   char* docker_command_with_binary = calloc(sizeof(char), EXECUTOR_PATH_MAX);
   snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", docker_binary, docker_command);
   char **args = extract_values_delim(docker_command_with_binary, " ");
@@ -1224,7 +1241,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
   char buffer[BUFFER_SIZE];
 
   char *docker_command = parse_docker_command_file(command_file);
-  char *docker_binary = get_value(DOCKER_BINARY_KEY);
+  char *docker_binary = get_value(DOCKER_BINARY_KEY, &executor_cfg);
   if (docker_binary == NULL) {
     docker_binary = "docker";
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed2e060/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index 57327f0..df5b7d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -95,10 +95,15 @@ extern FILE *LOGFILE;
 // the log file for error messages
 extern FILE *ERRORFILE;
 
-
 // get the executable's filename
 char* get_executable();
 
+//function used to load the configurations present in the secure config
+void read_executor_config(const char* file_name);
+
+//Lookup nodemanager group from container executor configuration.
+char *get_nodemanager_group();
+
 /**
  * Check the permissions on the container-executor to make sure that security is
  * permissible. For this, we need container-executor binary to
@@ -111,6 +116,12 @@ char* get_executable();
  */
 int check_executor_permissions(char *executable_file);
 
+//function used to load the configurations present in the secure config.
+void read_executor_config(const char* file_name);
+
+//function used to free executor configuration data
+void free_executor_configurations();
+
 // initialize the application directory
 int initialize_app(const char *user, const char *app_id,
                    const char *credentials, char* const* local_dirs,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed2e060/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index ab45c7e..222467a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -112,11 +112,11 @@ static void assert_valid_setup(char *current_executable) {
     flush_and_close_log_files();
     exit(INVALID_CONFIG_FILE);
   }
-  read_config(conf_file);
+  read_executor_config(conf_file);
   free(conf_file);
 
   // look up the node manager group in the config file
-  char *nm_group = get_value(NM_GROUP_KEY);
+  char *nm_group = get_nodemanager_group();
   if (nm_group == NULL) {
     fprintf(ERRORFILE, "Can't get configured value for %s.\n", NM_GROUP_KEY);
     flush_and_close_log_files();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed2e060/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index ab29543..67f0357 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -873,7 +873,7 @@ int main(int argc, char **argv) {
   if (write_config_file(TEST_ROOT "/test.cfg", 1) != 0) {
     exit(1);
   }
-  read_config(TEST_ROOT "/test.cfg");
+  read_executor_config(TEST_ROOT "/test.cfg");
 
   local_dirs = extract_values(strdup(NM_LOCAL_DIRS));
   log_dirs = extract_values(strdup(NM_LOG_DIRS));
@@ -945,14 +945,14 @@ int main(int argc, char **argv) {
   seteuid(0);
   // test_delete_user must run as root since that's how we use the delete_as_user
   test_delete_user();
-  free_configurations();
+  free_executor_configurations();
 
   printf("\nTrying banned default user()\n");
   if (write_config_file(TEST_ROOT "/test.cfg", 0) != 0) {
     exit(1);
   }
 
-  read_config(TEST_ROOT "/test.cfg");
+  read_executor_config(TEST_ROOT "/test.cfg");
   username = "bin";
   test_check_user(1);
 
@@ -963,6 +963,6 @@ int main(int argc, char **argv) {
   printf("\nFinished tests\n");
 
   free(current_username);
-  free_configurations();
+  free_executor_configurations();
   return 0;
 }


[08/34] hadoop git commit: YARN-4744. Too many signal to container failure in case of LCE. Contributed by Sidharta Seethana

Posted by ar...@apache.org.
YARN-4744. Too many signal to container failure in case of LCE. Contributed by Sidharta Seethana


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/059caf99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/059caf99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/059caf99

Branch: refs/heads/HDFS-1312
Commit: 059caf99891943d9587cac19b48e82efbed06b2d
Parents: fd1c09b
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Mar 7 15:40:01 2016 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Mar 7 15:40:01 2016 +0000

----------------------------------------------------------------------
 .../nodemanager/LinuxContainerExecutor.java     |  4 ++--
 .../linux/privileged/PrivilegedOperation.java   | 23 +++++++++++++++++---
 .../privileged/PrivilegedOperationExecutor.java | 21 ++++++++++--------
 .../linux/resources/CGroupsHandlerImpl.java     |  2 +-
 .../linux/resources/TrafficController.java      |  2 +-
 .../runtime/DefaultLinuxContainerRuntime.java   | 11 ++++++----
 .../runtime/DockerLinuxContainerRuntime.java    |  5 ++---
 .../TestPrivilegedOperationExecutor.java        |  4 ++--
 .../linux/resources/TestCGroupsHandlerImpl.java |  2 +-
 9 files changed, 48 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 1367b13..5a48e09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -181,7 +181,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
     // verify configuration/permissions and exit
     try {
       PrivilegedOperation checkSetupOp = new PrivilegedOperation(
-          PrivilegedOperation.OperationType.CHECK_SETUP, (String) null);
+          PrivilegedOperation.OperationType.CHECK_SETUP);
       PrivilegedOperationExecutor privilegedOperationExecutor =
           PrivilegedOperationExecutor.getInstance(conf);
 
@@ -242,7 +242,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
     verifyUsernamePattern(user);
     String runAsUser = getRunAsUser(user);
     PrivilegedOperation initializeContainerOp = new PrivilegedOperation(
-        PrivilegedOperation.OperationType.INITIALIZE_CONTAINER, (String) null);
+        PrivilegedOperation.OperationType.INITIALIZE_CONTAINER);
     List<String> prefixCommands = new ArrayList<>();
 
     addSchedPriorityCommand(prefixCommands);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
index cbbf7a8..259dee8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
@@ -68,10 +68,16 @@ public class PrivilegedOperation {
 
   private final OperationType opType;
   private final List<String> args;
+  private boolean failureLogging;
 
-  public PrivilegedOperation(OperationType opType, String arg) {
+  public PrivilegedOperation(OperationType opType) {
     this.opType = opType;
     this.args = new ArrayList<String>();
+    this.failureLogging = true;
+  }
+
+  public PrivilegedOperation(OperationType opType, String arg) {
+    this(opType);
 
     if (arg != null) {
       this.args.add(arg);
@@ -79,8 +85,7 @@ public class PrivilegedOperation {
   }
 
   public PrivilegedOperation(OperationType opType, List<String> args) {
-    this.opType = opType;
-    this.args = new ArrayList<String>();
+    this(opType);
 
     if (args != null) {
       this.args.addAll(args);
@@ -97,6 +102,18 @@ public class PrivilegedOperation {
     this.args.addAll(args);
   }
 
+  public void enableFailureLogging() {
+    this.failureLogging = true;
+  }
+
+  public void disableFailureLogging() {
+    this.failureLogging = false;
+  }
+
+  public boolean isFailureLoggingEnabled() {
+    return failureLogging;
+  }
+
   public OperationType getOperationType() {
     return opType;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
index 4b1bb9f..7370daa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
@@ -155,17 +155,20 @@ public class PrivilegedOperationExecutor {
         LOG.debug(exec.getOutput());
       }
     } catch (ExitCodeException e) {
-      StringBuilder logBuilder = new StringBuilder("Shell execution returned "
-          + "exit code: ")
-          .append(exec.getExitCode())
-          .append(". Privileged Execution Operation Output: ")
-          .append(System.lineSeparator()).append(exec.getOutput());
+      if (operation.isFailureLoggingEnabled()) {
 
-      logBuilder.append("Full command array for failed execution: ")
-          .append(System.lineSeparator());
-      logBuilder.append(Arrays.toString(fullCommandArray));
+        StringBuilder logBuilder = new StringBuilder("Shell execution returned "
+            + "exit code: ")
+            .append(exec.getExitCode())
+            .append(". Privileged Execution Operation Output: ")
+            .append(System.lineSeparator()).append(exec.getOutput());
 
-      LOG.warn(logBuilder.toString());
+        logBuilder.append("Full command array for failed execution: ")
+            .append(System.lineSeparator());
+        logBuilder.append(Arrays.toString(fullCommandArray));
+
+        LOG.warn(logBuilder.toString());
+      }
 
       //stderr from shell executor seems to be stuffed into the exception
       //'message' - so, we have to extract it and set it as the error out

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 1ee0f8a..36bd468 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -247,7 +247,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
             .append(controller.getName()).append('=').append(controllerPath);
         PrivilegedOperation.OperationType opType = PrivilegedOperation
             .OperationType.MOUNT_CGROUPS;
-        PrivilegedOperation op = new PrivilegedOperation(opType, (String) null);
+        PrivilegedOperation op = new PrivilegedOperation(opType);
 
         op.appendArgs(hierarchy, cGroupKV.toString());
         LOG.info("Mounting controller " + controller.getName() + " at " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
index e33cea4..f1468fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
@@ -546,7 +546,7 @@ import java.util.regex.Pattern;
       case TC_MODIFY_STATE:
       case TC_READ_STATE:
       case TC_READ_STATS:
-        operation = new PrivilegedOperation(opType, (String) null);
+        operation = new PrivilegedOperation(opType);
         commands = new ArrayList<>();
         break;
       default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index 633fa66..3862b92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -67,7 +67,7 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     Container container = ctx.getContainer();
     PrivilegedOperation launchOp = new PrivilegedOperation(
-        PrivilegedOperation.OperationType.LAUNCH_CONTAINER, (String) null);
+        PrivilegedOperation.OperationType.LAUNCH_CONTAINER);
 
     //All of these arguments are expected to be available in the runtime context
     launchOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER),
@@ -116,7 +116,7 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     Container container = ctx.getContainer();
     PrivilegedOperation signalOp = new PrivilegedOperation(
-        PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null);
+        PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
 
     signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER),
         ctx.getExecutionAttribute(USER),
@@ -125,6 +125,9 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
         ctx.getExecutionAttribute(PID),
         Integer.toString(ctx.getExecutionAttribute(SIGNAL).getValue()));
 
+    //Some failures here are acceptable. Let the calling executor decide.
+    signalOp.disableFailureLogging();
+
     try {
       PrivilegedOperationExecutor executor = PrivilegedOperationExecutor
           .getInstance(conf);
@@ -133,8 +136,8 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
           signalOp, null, container.getLaunchContext().getEnvironment(),
           false);
     } catch (PrivilegedOperationException e) {
-      LOG.warn("Signal container failed. Exception: ", e);
-
+      //Don't log the failure here. Some kinds of signaling failures are
+      // acceptable. Let the calling executor decide what to do.
       throw new ContainerExecutionException("Signal container failed", e
           .getExitCode(), e.getOutput(), e.getErrorOutput());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 2dee663..2b4fc79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -280,8 +280,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     String commandFile = dockerClient.writeCommandToTempFile(runCommand,
         containerIdStr);
     PrivilegedOperation launchOp = new PrivilegedOperation(
-        PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER, (String)
-        null);
+        PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER);
 
     launchOp.appendArgs(runAsUser, ctx.getExecutionAttribute(USER),
         Integer.toString(PrivilegedOperation
@@ -321,7 +320,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     Container container = ctx.getContainer();
     PrivilegedOperation signalOp = new PrivilegedOperation(
-        PrivilegedOperation.OperationType.SIGNAL_CONTAINER, (String) null);
+        PrivilegedOperation.OperationType.SIGNAL_CONTAINER);
 
     signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER),
         ctx.getExecutionAttribute(USER),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
index 849dbab..7146412 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
@@ -69,7 +69,7 @@ public class TestPrivilegedOperationExecutor {
     cGroupTasks2 = "net_cls/hadoop_yarn/container_01/tasks";
     cGroupTasks3 = "blkio/hadoop_yarn/container_01/tasks";
     opDisallowed = new PrivilegedOperation
-        (PrivilegedOperation.OperationType.DELETE_AS_USER, (String) null);
+        (PrivilegedOperation.OperationType.DELETE_AS_USER);
     opTasksNone = new PrivilegedOperation
         (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
             PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasksNone);
@@ -118,7 +118,7 @@ public class TestPrivilegedOperationExecutor {
     PrivilegedOperationExecutor exec = PrivilegedOperationExecutor
         .getInstance(confWithExecutorPath);
     PrivilegedOperation op = new PrivilegedOperation(PrivilegedOperation
-        .OperationType.TC_MODIFY_STATE, (String) null);
+        .OperationType.TC_MODIFY_STATE);
     String[] cmdArray = exec.getPrivilegedOperationExecutionCommand(null, op);
 
     //No arguments added - so the resulting array should consist of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/059caf99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
index 50f8da6..76d56b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
@@ -89,7 +89,7 @@ public class TestCGroupsHandlerImpl {
       cGroupsHandler = new CGroupsHandlerImpl(conf,
           privilegedOperationExecutorMock);
       PrivilegedOperation expectedOp = new PrivilegedOperation(
-          PrivilegedOperation.OperationType.MOUNT_CGROUPS, (String) null);
+          PrivilegedOperation.OperationType.MOUNT_CGROUPS);
       //This is expected to be of the form :
       //net_cls=<mount_path>/net_cls
       StringBuffer controllerKV = new StringBuffer(controller.getName())