You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2015/07/15 23:33:01 UTC

[1/6] hadoop git commit: MAPREDUCE-6427. Fix typo in JobHistoryEventHandler. Contributed by Ray Chiang

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 6da5a33bb -> b0e5a349f


MAPREDUCE-6427. Fix typo in JobHistoryEventHandler. Contributed by Ray Chiang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4ccdb11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4ccdb11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4ccdb11

Branch: refs/heads/HDFS-7240
Commit: f4ccdb11dca17db139a3746584e321d884651d01
Parents: 979c9ca
Author: Chris Douglas <cd...@apache.org>
Authored: Tue Jul 14 14:51:06 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Tue Jul 14 14:55:43 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                               | 2 ++
 .../apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ccdb11/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 95eec1c..31f4eaa 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -537,6 +537,8 @@ Release 2.8.0 - UNRELEASED
     RMContainerAllocator.reduceNodeLabelExpression.
     (Brahma Reddy Battula via aajisaka)
 
+    MAPREDUCE-6427. Fix typo in JobHistoryEventHandler. (Ray Chiang via cdouglas)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4ccdb11/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 0457cc5..b0bcfcd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -748,7 +748,7 @@ public class JobHistoryEventHandler extends AbstractService
         tEvent.addEventInfo("JOB_CONF_PATH", jse.getJobConfPath());
         tEvent.addEventInfo("ACLS", jse.getJobAcls());
         tEvent.addEventInfo("JOB_QUEUE_NAME", jse.getJobQueueName());
-        tEvent.addEventInfo("WORKLFOW_ID", jse.getWorkflowId());
+        tEvent.addEventInfo("WORKFLOW_ID", jse.getWorkflowId());
         tEvent.addEventInfo("WORKFLOW_NAME", jse.getWorkflowName());
         tEvent.addEventInfo("WORKFLOW_NAME_NAME", jse.getWorkflowNodeName());
         tEvent.addEventInfo("WORKFLOW_ADJACENCIES",


[5/6] hadoop git commit: HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock. (Contributed by Arpit Agarwal)

Posted by ar...@apache.org.
HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock. (Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ec0a044
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ec0a044
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ec0a044

Branch: refs/heads/HDFS-7240
Commit: 3ec0a0444f75c8743289ec7c8645d4bdf51fc45a
Parents: edcaae4
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Jul 15 14:08:58 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Jul 15 14:08:58 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  5 +-
 .../TestBlockReportRateLimiting.java            | 64 ++++++--------------
 2 files changed, 23 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ec0a044/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 20bdef0..8f6dd41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1044,6 +1044,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7608: hdfs dfsclient newConnectedPeer has no write timeout (Xiaoyu Yao
     via Colin P. McCabe)
 
+    HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock.
+    (Arpit Agarwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1059,7 +1062,7 @@ Release 2.7.2 - UNRELEASED
   HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
-
+    
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ec0a044/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
index fc5f9e7..86a7511 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
@@ -24,7 +24,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FULL_BLOCK_REPOR
 import com.google.common.base.Joiner;
 import com.google.common.base.Supplier;
 import com.google.common.util.concurrent.Uninterruptibles;
-import org.apache.commons.lang.mutable.MutableObject;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,8 +41,6 @@ import org.junit.Test;
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
@@ -174,13 +171,11 @@ public class TestBlockReportRateLimiting {
     conf.setLong(DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS, 100L);
 
     final Semaphore gotFbrSem = new Semaphore(0);
-    final AtomicReference<String> failure = new AtomicReference<String>("");
+    final AtomicReference<String> failure = new AtomicReference<>();
     final AtomicReference<MiniDFSCluster> cluster =
-        new AtomicReference<>(null);
-    final BlockingQueue<Integer> datanodeToStop =
-        new ArrayBlockingQueue<Integer>(1);
+        new AtomicReference<>();
+    final AtomicReference<String> datanodeToStop = new AtomicReference<>();
     final BlockManagerFaultInjector injector = new BlockManagerFaultInjector() {
-      private String uuidToStop = "";
 
       @Override
       public void incomingBlockReportRpc(DatanodeID nodeID,
@@ -189,11 +184,9 @@ public class TestBlockReportRateLimiting {
           setFailure(failure, "Got unexpected rate-limiting-" +
               "bypassing full block report RPC from " + nodeID);
         }
-        synchronized (this) {
-          if (uuidToStop.equals(nodeID.getDatanodeUuid())) {
-            throw new IOException("Injecting failure into block " +
-                "report RPC for " + nodeID);
-          }
+        if (nodeID.getXferAddr().equals(datanodeToStop.get())) {
+          throw new IOException("Injecting failure into block " +
+              "report RPC for " + nodeID);
         }
         gotFbrSem.release();
       }
@@ -204,43 +197,24 @@ public class TestBlockReportRateLimiting {
         if (leaseId == 0) {
           return;
         }
-        synchronized (this) {
-          if (uuidToStop.isEmpty()) {
-            MiniDFSCluster cl;
-            do {
-              cl = cluster.get();
-            } while (cl == null);
-            int datanodeIndexToStop = getDatanodeIndex(cl, node);
-            uuidToStop = node.getDatanodeUuid();
-            datanodeToStop.add(Integer.valueOf(datanodeIndexToStop));
-          }
-        }
-      }
-
-      private int getDatanodeIndex(MiniDFSCluster cl,
-                                   DatanodeDescriptor node) {
-        List<DataNode> datanodes = cl.getDataNodes();
-        for (int i = 0; i < datanodes.size(); i++) {
-          DataNode datanode = datanodes.get(i);
-          if (datanode.getDatanodeUuid().equals(node.getDatanodeUuid())) {
-            return i;
-          }
-        }
-        throw new RuntimeException("Failed to find UUID " +
-            node.getDatanodeUuid() + " in the list of datanodes.");
+        datanodeToStop.compareAndSet(null, node.getXferAddr());
       }
 
       @Override
       public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) {
       }
     };
-    BlockManagerFaultInjector.instance = injector;
-    cluster.set(new MiniDFSCluster.Builder(conf).numDataNodes(2).build());
-    cluster.get().waitActive();
-    int datanodeIndexToStop = datanodeToStop.take();
-    cluster.get().stopDataNode(datanodeIndexToStop);
-    gotFbrSem.acquire();
-    cluster.get().shutdown();
-    Assert.assertEquals("", failure.get());
+    try {
+      BlockManagerFaultInjector.instance = injector;
+      cluster.set(new MiniDFSCluster.Builder(conf).numDataNodes(2).build());
+      cluster.get().waitActive();
+      Assert.assertNotNull(cluster.get().stopDataNode(datanodeToStop.get()));
+      gotFbrSem.acquire();
+      Assert.assertNull(failure.get());
+    } finally {
+      if (cluster.get() != null) {
+        cluster.get().shutdown();
+      }
+    }
   }
 }


[4/6] hadoop git commit: YARN-3170. YARN architecture document needs updating. Contirubted by Brahma Reddy Battula.

Posted by ar...@apache.org.
YARN-3170. YARN architecture document needs updating. Contirubted by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edcaae44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edcaae44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edcaae44

Branch: refs/heads/HDFS-7240
Commit: edcaae44c10b7e88e68fa97afd32e4da4a9d8df7
Parents: cec1d43
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Jul 15 15:42:41 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Wed Jul 15 15:42:41 2015 +0900

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../hadoop-yarn-site/src/site/markdown/YARN.md  | 22 +++++++-------------
 2 files changed, 10 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edcaae44/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 780c667..0a6f871 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -639,6 +639,9 @@ Release 2.7.2 - UNRELEASED
 
   IMPROVEMENTS
 
+    YARN-3170. YARN architecture document needs updating. (Brahma Reddy Battula
+    via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/edcaae44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md
index f79272c..f8e8154 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md
@@ -12,14 +12,12 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
-Apache Hadoop NextGen MapReduce (YARN)
+Apache Hadoop YARN
 ==================
 
-MapReduce has undergone a complete overhaul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2) or YARN.
+The fundamental idea of YARN is to split up the functionalities of resource management and job scheduling/monitoring into separate daemons. The idea is to have a global ResourceManager (*RM*) and per-application ApplicationMaster (*AM*). An application is either a single job or a DAG of jobs.
 
-The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (*RM*) and per-application ApplicationMaster (*AM*). An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs.
-
-The ResourceManager and per-node slave, the NodeManager (*NM*), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system.
+The ResourceManager and the NodeManager form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The NodeManager is the per-machine framework agent who is responsible for containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the ResourceManager/Scheduler.
 
 The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
 
@@ -27,16 +25,10 @@ The per-application ApplicationMaster is, in effect, a framework specific librar
 
 The ResourceManager has two main components: Scheduler and ApplicationsManager.
 
-The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees about restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a resource *Container* which incorporates elements such as memory, cpu, disk, network etc. In the first version, only `memory` is supported.
-
-The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.
-
-The CapacityScheduler supports `hierarchical queues` to allow for more predictable sharing of cluster resources
-
-The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.
+The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees about restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a resource *Container* which incorporates elements such as memory, cpu, disk, network etc.
 
-The NodeManager is the per-machine framework agent who is responsible for containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the ResourceManager/Scheduler.
+The Scheduler has a pluggable policy which is responsible for partitioning the cluster resources among the various queues, applications etc. The current schedulers such as the [CapacityScheduler](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html) and the [FairScheduler](http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html) would be some examples of plug-ins.
 
-The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
+The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure. The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
 
-MRV2 maintains **API compatibility** with previous stable release (hadoop-1.x). This means that all Map-Reduce jobs should still run unchanged on top of MRv2 with just a recompile.
+MapReduce in hadoop-2.x maintains **API compatibility** with previous stable release (hadoop-1.x). This means that all MapReduce jobs should still run unchanged on top of YARN with just a recompile.


[3/6] hadoop git commit: HADOOP-12153. ByteBufferReadable doesn't declare @InterfaceAudience and @InterfaceStability. Contributed by Brahma Reddy Battula.

Posted by ar...@apache.org.
HADOOP-12153. ByteBufferReadable doesn't declare @InterfaceAudience and @InterfaceStability. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cec1d43d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cec1d43d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cec1d43d

Branch: refs/heads/HDFS-7240
Commit: cec1d43db026e66a9e84b5c3e8476dfd33f17ecb
Parents: 0a16ee6
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Jul 15 14:18:12 2015 +0900
Committer: Tsuyoshi Ozawa <oz...@apache.org>
Committed: Wed Jul 15 14:19:17 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 .../src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java   | 4 ++++
 2 files changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cec1d43d/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a807d12..3d64156 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -697,6 +697,9 @@ Release 2.8.0 - UNRELEASED
 
     HADOOP-12211. Collect disk usage on the node (Robert Grandl via cdouglas)
 
+    HADOOP-12153. ByteBufferReadable doesn't declare @InterfaceAudience and
+    @InterfaceStability. (Brahma Reddy Battula via ozawa)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cec1d43d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
index aa6e85e..20f7224 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
@@ -19,11 +19,15 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * Implementers of this interface provide a read API that writes to a
  * ByteBuffer, not a byte[].
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public interface ByteBufferReadable {
   /**
    * Reads up to buf.remaining() bytes into buf. Callers should use


[2/6] hadoop git commit: HADOOP-12232. Upgrade Tomcat dependency to 6.0.44. Contributed by Chris Nauroth.

Posted by ar...@apache.org.
HADOOP-12232. Upgrade Tomcat dependency to 6.0.44. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a16ee60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a16ee60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a16ee60

Branch: refs/heads/HDFS-7240
Commit: 0a16ee60174b15e3df653bb107cb2d0c2d606330
Parents: f4ccdb1
Author: cnauroth <cn...@apache.org>
Authored: Tue Jul 14 14:53:08 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Jul 14 15:22:33 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 hadoop-project/pom.xml                          | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a16ee60/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a9bd7de..a807d12 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -966,6 +966,8 @@ Release 2.7.2 - UNRELEASED
 
   IMPROVEMENTS
 
+    HADOOP-12232. Upgrade Tomcat dependency to 6.0.44. (cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a16ee60/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index e010de1..d563420 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -76,7 +76,7 @@
     <curator.version>2.7.1</curator.version>
     <findbugs.version>3.0.0</findbugs.version>
 
-    <tomcat.version>6.0.41</tomcat.version>
+    <tomcat.version>6.0.44</tomcat.version>
 
     <!-- define the Java language version used by the compiler -->
     <javac.version>1.7</javac.version>


[6/6] hadoop git commit: Merge branch 'trunk' into HDFS-7240

Posted by ar...@apache.org.
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0e5a349
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0e5a349
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0e5a349

Branch: refs/heads/HDFS-7240
Commit: b0e5a349f35f452b86b73faed69bebfe038817e5
Parents: 6da5a33 3ec0a04
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Jul 15 14:28:48 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Jul 15 14:28:48 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  5 ++
 .../apache/hadoop/fs/ByteBufferReadable.java    |  4 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  5 +-
 .../TestBlockReportRateLimiting.java            | 64 ++++++--------------
 hadoop-mapreduce-project/CHANGES.txt            |  2 +
 .../jobhistory/JobHistoryEventHandler.java      |  2 +-
 hadoop-project/pom.xml                          |  2 +-
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../hadoop-yarn-site/src/site/markdown/YARN.md  | 22 +++----
 9 files changed, 46 insertions(+), 63 deletions(-)
----------------------------------------------------------------------