You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2012/10/22 22:43:25 UTC

svn commit: r1401071 [1/4] - in /hadoop/common/branches/branch-trunk-win/hadoop-common-project: hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/ hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/ hado...

Author: suresh
Date: Mon Oct 22 20:43:16 2012
New Revision: 1401071

URL: http://svn.apache.org/viewvc?rev=1401071&view=rev
Log:
Merging trunk changes to branch-trunk-win.

Removed:
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/packages/
Modified:
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/CHANGES.txt   (contents, props changed)
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/CMakeLists.txt
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/docs/   (props changed)
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/   (props changed)
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsException.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsRecord.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsCollector.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsFilter.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsInfo.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsRecord.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsRecordBuilder.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSink.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSource.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordBuilderImpl.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Quantile.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/core/   (props changed)
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestSampleQuantiles.java
    hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java

Modified: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java Mon Oct 22 20:43:16 2012
@@ -158,7 +158,7 @@ public class KerberosAuthenticator imple
       conn.setRequestMethod(AUTH_HTTP_METHOD);
       conn.connect();
       
-      if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+      if (conn.getRequestProperty(AUTHORIZATION) != null && conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
         LOG.debug("JDK performed authentication on our behalf.");
         // If the JDK already did the SPNEGO back-and-forth for
         // us, just pull out the token.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java Mon Oct 22 20:43:16 2012
@@ -134,9 +134,11 @@ public abstract class AuthenticatorTestC
     try {
       URL url = new URL(getBaseURL());
       AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+      Assert.assertFalse(token.isSet());
       TestConnectionConfigurator connConf = new TestConnectionConfigurator();
       AuthenticatedURL aUrl = new AuthenticatedURL(authenticator, connConf);
       HttpURLConnection conn = aUrl.openConnection(url, token);
+      Assert.assertTrue(token.isSet());
       Assert.assertTrue(connConf.invoked);
       String tokenStr = token.toString();
       if (doPost) {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java Mon Oct 22 20:43:16 2012
@@ -43,6 +43,14 @@ public class TestKerberosAuthenticator e
     _testAuthentication(new KerberosAuthenticator(), false);
   }
 
+  public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
+    Properties props = new Properties();
+    props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
+    props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
+    setAuthenticationHandlerConfig(props);
+    _testAuthentication(new KerberosAuthenticator(), false);
+  }
+
   public void testNotAuthenticated() throws Exception {
     setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
     start();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/CHANGES.txt Mon Oct 22 20:43:16 2012
@@ -72,8 +72,8 @@ Trunk (Unreleased)
     HADOOP-8360. empty-configuration.xml fails xml validation
     (Radim Kolar via harsh)
 
-    HADOOP-8367 Improve documentation of declaringClassProtocolName in rpc headers 
-                (Sanjay Radia)
+    HADOOP-8367 Improve documentation of declaringClassProtocolName in 
+    rpc headers. (Sanjay Radia)
 
     HADOOP-8415. Add getDouble() and setDouble() in
     org.apache.hadoop.conf.Configuration (Jan van der Lugt via harsh)
@@ -122,6 +122,13 @@ Trunk (Unreleased)
     HADOOP-8864. Addendum to HADOOP-8840: Add a coloring case for +0 results
     too. (harsh)
 
+    HADOOP-8910. Add examples to GlobExpander#expand method. (suresh)
+
+    HADOOP-8920. Add more javadoc to metrics2 related classes. (suresh)
+
+    HADOOP-8776. Provide an option in test-patch that can enable/disable
+    compiling native code. (Chris Nauroth via suresh)
+
   BUG FIXES
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -259,6 +266,9 @@ Trunk (Unreleased)
     HADOOP-8839. test-patch's -1 on @author tag presence doesn't cause
     a -1 to the overall result (harsh)
 
+    HADOOP-8918. test-patch.sh is parsing modified files wrong.
+    (Raja Aluri via suresh)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -303,11 +313,31 @@ Release 2.0.3-alpha - Unreleased 
     HADOOP-8909. Hadoop Common Maven protoc calls must not depend on external
     sh script. (Chris Nauroth via suresh)
 
+    HADOOP-8911. CRLF characters in source and text files.
+    (Raja Aluri via suresh)
+
+    HADOOP-8912. Add .gitattributes file to prevent CRLF and LF mismatches
+    for source and text files. (Raja Aluri via suresh)
+
+    HADOOP-8784. Improve IPC.Client's token use (daryn)
+
+    HADOOP-8929. Add toString, other improvements for SampleQuantiles (todd)
+
+    HADOOP-8922. Provide alternate JSONP output for JMXJsonServlet to allow
+    javascript in browser dashboard (Damien Hardy via bobby)
+
+    HADOOP-8931. Add Java version to startup message. (eli)
+
+    HADOOP-8925. Remove the packaging. (eli)
+
   OPTIMIZATIONS
 
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
     via atm)
 
+    HADOOP-8926. hadoop.util.PureJavaCrc32 cache hit-ratio is low for static
+    data (Gopal V via bobby)
+
   BUG FIXES
 
     HADOOP-8795. BASH tab completion doesn't look in PATH, assumes path to
@@ -338,6 +368,20 @@ Release 2.0.3-alpha - Unreleased 
     webhdfs filesystem and fsck to fail when security is on.
     (Arpit Gupta via suresh)
 
+    HADOOP-8901. GZip and Snappy support may not work without unversioned
+    libraries (Colin Patrick McCabe via todd)
+
+    HADOOP-8883. Anonymous fallback in KerberosAuthenticator is broken.
+    (rkanter via tucu)
+
+    HADOOP-8900. BuiltInGzipDecompressor throws IOException - stored gzip size
+    doesn't match decompressed size. (Slavik Krassovsky via suresh)
+
+    HADOOP-8948. TestFileUtil.testGetDU fails on Windows due to incorrect
+    assumption of line separator. (Chris Nauroth via suresh)
+
+    HADOOP-8951. RunJar to fail with user-comprehensible error 
+    message if jar missing. (stevel via suresh)
 
 Release 2.0.2-alpha - 2012-09-07 
 
@@ -1037,10 +1081,19 @@ Release 0.23.5 - UNRELEASED
 
   IMPROVEMENTS
 
+    HADOOP-8932. JNI-based user-group mapping modules can be too chatty on 
+    lookup failures. (Kihwal Lee via suresh)
+
+    HADOOP-8930. Cumulative code coverage calculation (Andrey Klochkov via
+    bobby)
+
   OPTIMIZATIONS
 
   BUG FIXES
 
+    HADOOP-8906. paths with multiple globs are unreliable. (Daryn Sharp via
+    jlowe)
+
 Release 0.23.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1397381-1401062

Modified: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/CMakeLists.txt?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/CMakeLists.txt (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/CMakeLists.txt Mon Oct 22 20:43:16 2012
@@ -50,12 +50,41 @@ function(dual_output_directory TGT DIR)
     output_directory(${TGT}_static "${DIR}")
 endfunction(dual_output_directory TGT DIR)
 
+#
+# This macro alters the behavior of find_package and find_library.
+# It does this by setting the CMAKE_FIND_LIBRARY_SUFFIXES global variable. 
+# You should save that variable before calling this function and restore it
+# after you have accomplished your goal.
+#
+# The behavior is altered in two ways:
+# 1. We always find shared libraries, never static;
+# 2. We find shared libraries with the given version number.
+#
+# On Windows this function is a no-op.  Windows does not encode
+# version number information information into library path names.
+#
+macro(set_find_shared_library_version LVERS)
+    IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+        # Mac OS uses .dylib
+        SET(CMAKE_FIND_LIBRARY_SUFFIXES ".${LVERS}.dylib")
+    ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
+        # Windows doesn't support finding shared libraries by version.
+    ELSE()
+        # Most UNIX variants use .so
+        SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so.${LVERS}")
+    ENDIF()
+endmacro(set_find_shared_library_version LVERS)
+
 if (NOT GENERATED_JAVAH)
     # Must identify where the generated headers have been placed
     MESSAGE(FATAL_ERROR "You must set the cmake variable GENERATED_JAVAH")
 endif (NOT GENERATED_JAVAH)
 find_package(JNI REQUIRED)
+
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
+set_find_shared_library_version("1")
 find_package(ZLIB REQUIRED)
+SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
 
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_FILE_OFFSET_BITS=64")
@@ -69,10 +98,13 @@ INCLUDE(CheckCSourceCompiles)
 CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
 CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
 
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
+set_find_shared_library_version("1")
 find_library(SNAPPY_LIBRARY 
     NAMES snappy
     PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/lib
           ${CUSTOM_SNAPPY_PREFIX}/lib64 ${CUSTOM_SNAPPY_LIB})
+SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
 find_path(SNAPPY_INCLUDE_DIR 
     NAMES snappy.h
     PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/include

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/docs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1397381-1401062

Modified: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html?rev=1401071&r1=1401070&r2=1401071&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html Mon Oct 22 20:43:16 2012
@@ -15,8 +15,8 @@ These release notes include new develope
 <li> <a href="https://issues.apache.org/jira/browse/YARN-137">YARN-137</a>.
      Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (scheduler)<br>
      <b>Change the default scheduler to the CapacityScheduler</b><br>
-     <blockquote>There's some bugs in the FifoScheduler atm - doesn't distribute tasks across nodes and some headroom (available resource) issues.
-That's not the best experience for users trying out the 2.0 branch. The CS with the default configuration of a single queue behaves the same as the FifoScheduler and doesn't have these issues.
+     <blockquote>There's some bugs in the FifoScheduler atm - doesn't distribute tasks across nodes and some headroom (available resource) issues.
+That's not the best experience for users trying out the 2.0 branch. The CS with the default configuration of a single queue behaves the same as the FifoScheduler and doesn't have these issues.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-108">YARN-108</a>.
      Critical bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
@@ -45,73 +45,73 @@ That's not the best experience for users
 <li> <a href="https://issues.apache.org/jira/browse/YARN-79">YARN-79</a>.
      Major bug reported by Bikas Saha and fixed by Vinod Kumar Vavilapalli (client)<br>
      <b>Calling YarnClientImpl.close throws Exception</b><br>
-     <blockquote>The following exception is thrown
-===========
-*org.apache.hadoop.HadoopIllegalArgumentException: Cannot close proxy - is not Closeable or does not provide closeable invocation handler class org.apache.hadoop.yarn.api.impl.pb.client.ClientRMProtocolPBClientImpl*
-	*at org.apache.hadoop.ipc.RPC.stopProxy(RPC.java:624)*
-	*at org.hadoop.yarn.client.YarnClientImpl.stop(YarnClientImpl.java:102)*
-	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.UnmanagedAMLauncher.run(UnmanagedAMLauncher.java:336)
-	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher.testDSShell(TestUnmanagedAMLauncher.java:156)
-	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
-	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
-	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
-	at java.lang.reflect.Method.invoke(Method.java:597)
-	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
-	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
-	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
-	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)
-	at org.junit.runners.BlockJUnit4ClassRunner.runNotIgnored(BlockJUnit4ClassRunner.java:79)
-	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:71)
-	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:49)
-	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)
-	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)
-	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)
-	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)
-	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)
-	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)
-	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)
-	at org.junit.runners.ParentRunner.run(ParentRunner.java:236)
-	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:236)
-	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:134)
-	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:113)
-	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
-	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
-	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
-	at java.lang.reflect.Method.invoke(Method.java:597)
-	at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray(ReflectionUtils.java:189)
-	at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:165)
-	at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:85)
-	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:103)
-	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:74)
+     <blockquote>The following exception is thrown
+===========
+*org.apache.hadoop.HadoopIllegalArgumentException: Cannot close proxy - is not Closeable or does not provide closeable invocation handler class org.apache.hadoop.yarn.api.impl.pb.client.ClientRMProtocolPBClientImpl*
+	*at org.apache.hadoop.ipc.RPC.stopProxy(RPC.java:624)*
+	*at org.hadoop.yarn.client.YarnClientImpl.stop(YarnClientImpl.java:102)*
+	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.UnmanagedAMLauncher.run(UnmanagedAMLauncher.java:336)
+	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher.testDSShell(TestUnmanagedAMLauncher.java:156)
+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+	at java.lang.reflect.Method.invoke(Method.java:597)
+	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
+	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
+	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
+	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)
+	at org.junit.runners.BlockJUnit4ClassRunner.runNotIgnored(BlockJUnit4ClassRunner.java:79)
+	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:71)
+	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:49)
+	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)
+	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)
+	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)
+	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)
+	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)
+	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)
+	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)
+	at org.junit.runners.ParentRunner.run(ParentRunner.java:236)
+	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:236)
+	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:134)
+	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:113)
+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+	at java.lang.reflect.Method.invoke(Method.java:597)
+	at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray(ReflectionUtils.java:189)
+	at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:165)
+	at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:85)
+	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:103)
+	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:74)
 ===========</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-75">YARN-75</a>.
      Major bug reported by Siddharth Seth and fixed by Siddharth Seth <br>
      <b>RMContainer should handle a RELEASE event while RUNNING</b><br>
-     <blockquote>An AppMaster can send a container release at any point. Currently this results in an exception, if this is done while the RM considers the container to be RUNNING.
+     <blockquote>An AppMaster can send a container release at any point. Currently this results in an exception, if this is done while the RM considers the container to be RUNNING.
 The event not being processed correctly also implies that these containers do not show up in the Completed Container List seen by the AM (AMRMProtocol). MR-3902 depends on this set being complete. </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-68">YARN-68</a>.
      Major bug reported by patrick white and fixed by Daryn Sharp (nodemanager)<br>
      <b>NodeManager will refuse to shutdown indefinitely due to container log aggregation</b><br>
-     <blockquote>The nodemanager is able to get into a state where containermanager.logaggregation.AppLogAggregatorImpl will apparently wait
-indefinitely for log aggregation to complete for an application, even if that application has abnormally terminated and is no longer present. 
-
-Observed behavior is that an attempt to stop the nodemanager daemon will return but have no effect, the nm log continually displays messages similar to this:
-
-[Thread-1]2012-08-21 17:44:07,581 INFO
-org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl:
-Waiting for aggregation to complete for application_1345221477405_2733
-
-The only recovery we found to work was to 'kill -9' the nm process.
-
-What exactly causes the NM to enter this state is unclear but we do see this behavior reliably when the NM has run a task which failed, for example when debugging oozie distcp actions and having a distcp map task fail, the NM that was running the container will now enter this state where a shutdown on said NM will never complete, 'never' in this case was waiting for 2 hours before killing the nodemanager process.
+     <blockquote>The nodemanager is able to get into a state where containermanager.logaggregation.AppLogAggregatorImpl will apparently wait
+indefinitely for log aggregation to complete for an application, even if that application has abnormally terminated and is no longer present. 
+
+Observed behavior is that an attempt to stop the nodemanager daemon will return but have no effect, the nm log continually displays messages similar to this:
+
+[Thread-1]2012-08-21 17:44:07,581 INFO
+org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl:
+Waiting for aggregation to complete for application_1345221477405_2733
+
+The only recovery we found to work was to 'kill -9' the nm process.
+
+What exactly causes the NM to enter this state is unclear but we do see this behavior reliably when the NM has run a task which failed, for example when debugging oozie distcp actions and having a distcp map task fail, the NM that was running the container will now enter this state where a shutdown on said NM will never complete, 'never' in this case was waiting for 2 hours before killing the nodemanager process.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-66">YARN-66</a>.
      Critical bug reported by Thomas Graves and fixed by Thomas Graves (nodemanager)<br>
      <b>aggregated logs permissions not set properly</b><br>
-     <blockquote>If the default file permissions are set to something restrictive - like 700, application logs get aggregated and created with those restrictive file permissions which doesn't allow the history server to serve them up.
-
-
-They need to be created with group readable similar to how log aggregation sets up the directory permissions.
+     <blockquote>If the default file permissions are set to something restrictive - like 700, application logs get aggregated and created with those restrictive file permissions which doesn't allow the history server to serve them up.
+
+
+They need to be created with group readable similar to how log aggregation sets up the directory permissions.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-63">YARN-63</a>.
      Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
@@ -128,47 +128,47 @@ They need to be created with group reada
 <li> <a href="https://issues.apache.org/jira/browse/YARN-42">YARN-42</a>.
      Major bug reported by Devaraj K and fixed by Devaraj K (nodemanager)<br>
      <b>Node Manager throws NPE on startup</b><br>
-     <blockquote>NM throws NPE on startup if it doesn't have persmission's on nm local dir's
-
-
-{code:xml}
-2012-05-14 16:32:13,468 FATAL org.apache.hadoop.yarn.server.nodemanager.NodeManager: Error starting NodeManager
-org.apache.hadoop.yarn.YarnException: Failed to initialize LocalizationService
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:202)
-	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.init(ContainerManagerImpl.java:183)
-	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.init(NodeManager.java:166)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:268)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:284)
-Caused by: java.io.IOException: mkdir of /mrv2/tmp/nm-local-dir/usercache failed
-	at org.apache.hadoop.fs.FileSystem.primitiveMkdir(FileSystem.java:907)
-	at org.apache.hadoop.fs.DelegateToFileSystem.mkdir(DelegateToFileSystem.java:143)
-	at org.apache.hadoop.fs.FilterFs.mkdir(FilterFs.java:189)
-	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:706)
-	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:703)
-	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2325)
-	at org.apache.hadoop.fs.FileContext.mkdir(FileContext.java:703)
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:188)
-	... 6 more
-2012-05-14 16:32:13,472 INFO org.apache.hadoop.yarn.service.CompositeService: Error stopping org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler
-java.lang.NullPointerException
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.stop(NonAggregatingLogHandler.java:82)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stop(ContainerManagerImpl.java:266)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.stop(NodeManager.java:182)
-	at org.apache.hadoop.yarn.service.CompositeService$CompositeServiceShutdownHook.run(CompositeService.java:122)
-	at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
-{code}
+     <blockquote>NM throws NPE on startup if it doesn't have persmission's on nm local dir's
+
+
+{code:xml}
+2012-05-14 16:32:13,468 FATAL org.apache.hadoop.yarn.server.nodemanager.NodeManager: Error starting NodeManager
+org.apache.hadoop.yarn.YarnException: Failed to initialize LocalizationService
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:202)
+	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.init(ContainerManagerImpl.java:183)
+	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.init(NodeManager.java:166)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:268)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:284)
+Caused by: java.io.IOException: mkdir of /mrv2/tmp/nm-local-dir/usercache failed
+	at org.apache.hadoop.fs.FileSystem.primitiveMkdir(FileSystem.java:907)
+	at org.apache.hadoop.fs.DelegateToFileSystem.mkdir(DelegateToFileSystem.java:143)
+	at org.apache.hadoop.fs.FilterFs.mkdir(FilterFs.java:189)
+	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:706)
+	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:703)
+	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2325)
+	at org.apache.hadoop.fs.FileContext.mkdir(FileContext.java:703)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:188)
+	... 6 more
+2012-05-14 16:32:13,472 INFO org.apache.hadoop.yarn.service.CompositeService: Error stopping org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler
+java.lang.NullPointerException
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.stop(NonAggregatingLogHandler.java:82)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stop(ContainerManagerImpl.java:266)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.stop(NodeManager.java:182)
+	at org.apache.hadoop.yarn.service.CompositeService$CompositeServiceShutdownHook.run(CompositeService.java:122)
+	at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
+{code}
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-39">YARN-39</a>.
      Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
      <b>RM-NM secret-keys should be randomly generated and rolled every so often</b><br>
-     <blockquote> - RM should generate the master-key randomly
- - The master-key should roll every so often
+     <blockquote> - RM should generate the master-key randomly
+ - The master-key should roll every so often
  - NM should remember old expired keys so that already doled out container-requests can be satisfied.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-37">YARN-37</a>.
      Minor bug reported by Jason Lowe and fixed by Mayank Bansal (resourcemanager)<br>
@@ -177,42 +177,42 @@ java.lang.NullPointerException
 <li> <a href="https://issues.apache.org/jira/browse/YARN-36">YARN-36</a>.
      Blocker bug reported by Eli Collins and fixed by Radim Kolar <br>
      <b>branch-2.1.0-alpha doesn't build</b><br>
-     <blockquote>branch-2.1.0-alpha doesn't build due to the following. Per YARN-1 I updated the mvn version to be 2.1.0-SNAPSHOT, before I hit this issue it didn't compile due to the bogus version. 
-
-{noformat}
-hadoop-branch-2.1.0-alpha $ mvn compile
-[INFO] Scanning for projects...
-[ERROR] The build could not read 1 project -&gt; [Help 1]
-[ERROR]   
-[ERROR]   The project org.apache.hadoop:hadoop-yarn-project:2.1.0-SNAPSHOT (/home/eli/src/hadoop-branch-2.1.0-alpha/hadoop-yarn-project/pom.xml) has 1 error
-[ERROR]     'dependencies.dependency.version' for org.hsqldb:hsqldb:jar is missing. @ line 160, column 17
+     <blockquote>branch-2.1.0-alpha doesn't build due to the following. Per YARN-1 I updated the mvn version to be 2.1.0-SNAPSHOT, before I hit this issue it didn't compile due to the bogus version. 
+
+{noformat}
+hadoop-branch-2.1.0-alpha $ mvn compile
+[INFO] Scanning for projects...
+[ERROR] The build could not read 1 project -&gt; [Help 1]
+[ERROR]   
+[ERROR]   The project org.apache.hadoop:hadoop-yarn-project:2.1.0-SNAPSHOT (/home/eli/src/hadoop-branch-2.1.0-alpha/hadoop-yarn-project/pom.xml) has 1 error
+[ERROR]     'dependencies.dependency.version' for org.hsqldb:hsqldb:jar is missing. @ line 160, column 17
 {noformat}</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-31">YARN-31</a>.
      Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
      <b>TestDelegationTokenRenewer fails on jdk7</b><br>
-     <blockquote>TestDelegationTokenRenewer fails when run with jdk7.  
-
+     <blockquote>TestDelegationTokenRenewer fails when run with jdk7.  
+
 With JDK7, test methods run in an undefined order. Here it is expecting that testDTRenewal runs first but it no longer is.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-29">YARN-29</a>.
      Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client)<br>
      <b>Add a yarn-client module</b><br>
-     <blockquote>I see that we are duplicating (some) code for talking to RM via client API. In this light, a yarn-client module will be useful so that clients of all frameworks can use/extend it.
-
+     <blockquote>I see that we are duplicating (some) code for talking to RM via client API. In this light, a yarn-client module will be useful so that clients of all frameworks can use/extend it.
+
 And that same module can be the destination for all the YARN's command line tools.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-27">YARN-27</a>.
      Major bug reported by Ramya Sunil and fixed by Arun C Murthy <br>
      <b>Failed refreshQueues due to misconfiguration prevents further refreshing of queues</b><br>
-     <blockquote>Stumbled upon this problem while refreshing queues with incorrect configuration. The exact scenario was:
-1. Added a new queue "newQueue" without defining its capacity.
-2. "bin/mapred queue -refreshQueues" fails correctly with "Illegal capacity of -1 for queue root.newQueue"
-3. However, after defining the capacity of "newQueue" followed by a second "bin/mapred queue -refreshQueues" throws "org.apache.hadoop.metrics2.MetricsException: Metrics source QueueMetrics,q0=root,q1=newQueue already exists!" Also see Hadoop:name=QueueMetrics,q0=root,q1=newQueue,service=ResourceManager metrics being available even though the queue was not added.
-
+     <blockquote>Stumbled upon this problem while refreshing queues with incorrect configuration. The exact scenario was:
+1. Added a new queue "newQueue" without defining its capacity.
+2. "bin/mapred queue -refreshQueues" fails correctly with "Illegal capacity of -1 for queue root.newQueue"
+3. However, after defining the capacity of "newQueue" followed by a second "bin/mapred queue -refreshQueues" throws "org.apache.hadoop.metrics2.MetricsException: Metrics source QueueMetrics,q0=root,q1=newQueue already exists!" Also see Hadoop:name=QueueMetrics,q0=root,q1=newQueue,service=ResourceManager metrics being available even though the queue was not added.
+
 The expected behavior would be to refresh the queues correctly and allow addition of "newQueue". </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-25">YARN-25</a>.
      Major bug reported by Thomas Graves and fixed by Robert Joseph Evans <br>
      <b>remove old aggregated logs</b><br>
-     <blockquote>Currently the aggregated user logs under NM_REMOTE_APP_LOG_DIR are never removed.  We should have mechanism to remove them after certain period.
-
+     <blockquote>Currently the aggregated user logs under NM_REMOTE_APP_LOG_DIR are never removed.  We should have mechanism to remove them after certain period.
+
 It might make sense for job history server to remove them.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-22">YARN-22</a>.
      Minor bug reported by Eli Collins and fixed by Mayank Bansal <br>
@@ -221,29 +221,29 @@ It might make sense for job history serv
 <li> <a href="https://issues.apache.org/jira/browse/YARN-15">YARN-15</a>.
      Critical bug reported by Alejandro Abdelnur and fixed by Arun C Murthy (nodemanager)<br>
      <b>YarnConfiguration DEFAULT_YARN_APPLICATION_CLASSPATH should be updated</b><br>
-     <blockquote>
-{code}
-  /**
-   * Default CLASSPATH for YARN applications. A comma-separated list of
-   * CLASSPATH entries
-   */
-  public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
-      "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",
-      "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
-      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
-      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
-      "$YARN_HOME/share/hadoop/mapreduce/*",
-      "$YARN_HOME/share/hadoop/mapreduce/lib/*"};
-{code}
-
+     <blockquote>
+{code}
+  /**
+   * Default CLASSPATH for YARN applications. A comma-separated list of
+   * CLASSPATH entries
+   */
+  public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
+      "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",
+      "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
+      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
+      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
+      "$YARN_HOME/share/hadoop/mapreduce/*",
+      "$YARN_HOME/share/hadoop/mapreduce/lib/*"};
+{code}
+
 It should have {{share/yarn/}} and MR should add the {{share/mapreduce/}} (another JIRA?)</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-14">YARN-14</a>.
      Major bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
      <b>Symlinks to peer distributed cache files no longer work</b><br>
-     <blockquote>Trying to create a symlink to another file that is specified for the distributed cache will fail to create the link.  For example:
-
-hadoop jar ... -files "x,y,x#z"
-
+     <blockquote>Trying to create a symlink to another file that is specified for the distributed cache will fail to create the link.  For example:
+
+hadoop jar ... -files "x,y,x#z"
+
 will localize the files x and y as x and y, but the z symlink for x will not be created.  This is a regression from 1.x behavior.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-13">YARN-13</a>.
      Critical bug reported by Todd Lipcon and fixed by  <br>
@@ -252,13 +252,13 @@ will localize the files x and y as x and
 <li> <a href="https://issues.apache.org/jira/browse/YARN-12">YARN-12</a>.
      Major bug reported by Junping Du and fixed by Junping Du (scheduler)<br>
      <b>Several Findbugs issues with new FairScheduler in YARN</b><br>
-     <blockquote>The good feature of FairScheduler is added recently to YARN. As recently PreCommit test from MAPREDUCE-4309, there are several bugs found by Findbugs related to FairScheduler:
-org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.shutdown() might ignore java.lang.Exception
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.logDisabled; locked 50% of time
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.queueMaxAppsDefault; locked 50% of time
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.userMaxAppsDefault; locked 50% of time
-The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612//artifact/trunk/patchprocess/newPatchFindbugsWarningshadoop-yarn-server-resourcemanager.html#DE_MIGHT_IGNORE 
-
+     <blockquote>The good feature of FairScheduler is added recently to YARN. As recently PreCommit test from MAPREDUCE-4309, there are several bugs found by Findbugs related to FairScheduler:
+org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.shutdown() might ignore java.lang.Exception
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.logDisabled; locked 50% of time
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.queueMaxAppsDefault; locked 50% of time
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.userMaxAppsDefault; locked 50% of time
+The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612//artifact/trunk/patchprocess/newPatchFindbugsWarningshadoop-yarn-server-resourcemanager.html#DE_MIGHT_IGNORE 
+
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-10">YARN-10</a>.
      Major improvement reported by Arun C Murthy and fixed by Hitesh Shah <br>
@@ -991,18 +991,18 @@ The details are in:https://builds.apache
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3812">MAPREDUCE-3812</a>.
      Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Harsh J (mrv2 , performance)<br>
      <b>Lower default allocation sizes, fix allocation configurations and document them</b><br>
-     <blockquote>Removes two sets of previously available config properties:
-
-1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,
-2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )
-
-In favor of two new, generically named properties:
-
-1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.
-2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.
-
-Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.
-
+     <blockquote>Removes two sets of previously available config properties:
+
+1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,
+2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )
+
+In favor of two new, generically named properties:
+
+1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.
+2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.
+
+Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.
+
 Also changes the default minimum and maximums to 128 MB and 10 GB respectively.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3782">MAPREDUCE-3782</a>.
      Critical bug reported by Arpit Gupta and fixed by Jason Lowe (mrv2)<br>
@@ -1043,8 +1043,8 @@ Also changes the default minimum and max
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3543">MAPREDUCE-3543</a>.
      Critical bug reported by Mahadev konar and fixed by Thomas Graves (mrv2)<br>
      <b>Mavenize Gridmix.</b><br>
-     <blockquote>Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.
-
+     <blockquote>Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.
+
 If this is merged to more then trunk, the version inside of hadoop-tools/hadoop-gridmix/pom.xml will need to be udpated accordingly.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3506">MAPREDUCE-3506</a>.
      Minor bug reported by Ratandeep Ratti and fixed by Jason Lowe (client , mrv2)<br>
@@ -1613,10 +1613,10 @@ If this is merged to more then trunk, th
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-3475">HDFS-3475</a>.
      Trivial improvement reported by Harsh J and fixed by Harsh J <br>
      <b>Make the replication and invalidation rates configurable</b><br>
-     <blockquote>This change adds two new configuration parameters. 
-# {{dfs.namenode.invalidate.work.pct.per.iteration}} for controlling deletion rate of blocks. 
-# {{dfs.namenode.replication.work.multiplier.per.iteration}} for controlling replication rate. This in turn allows controlling the time it takes for decommissioning. 
-
+     <blockquote>This change adds two new configuration parameters. 
+# {{dfs.namenode.invalidate.work.pct.per.iteration}} for controlling deletion rate of blocks. 
+# {{dfs.namenode.replication.work.multiplier.per.iteration}} for controlling replication rate. This in turn allows controlling the time it takes for decommissioning. 
+
 Please see hdfs-default.xml for detailed description.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-3474">HDFS-3474</a>.
      Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly <br>
@@ -4769,8 +4769,8 @@ These release notes include new develope
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3720">MAPREDUCE-3720</a>.
      Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client , mrv2)<br>
      <b>Command line listJobs should not visit each AM</b><br>
-     <blockquote>Changed bin/mapred job -list to not print job-specific information not available at RM.
-
+     <blockquote>Changed bin/mapred job -list to not print job-specific information not available at RM.
+
 Very minor incompatibility in cmd-line output, inevitable due to MRv2 architecture.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3718">MAPREDUCE-3718</a>.
      Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2 , performance)<br>
@@ -4819,8 +4819,8 @@ Very minor incompatibility in cmd-line o
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3703">MAPREDUCE-3703</a>.
      Critical bug reported by Eric Payne and fixed by Eric Payne (mrv2 , resourcemanager)<br>
      <b>ResourceManager should provide node lists in JMX output</b><br>
-     <blockquote>New JMX Bean in ResourceManager to provide list of live node managers:
-
+     <blockquote>New JMX Bean in ResourceManager to provide list of live node managers:
+
 Hadoop:service=ResourceManager,name=RMNMInfo LiveNodeManagers</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3702">MAPREDUCE-3702</a>.
      Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
@@ -5037,12 +5037,12 @@ Hadoop:service=ResourceManager,name=RMNM
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3549">MAPREDUCE-3549</a>.
      Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
      <b>write api documentation for web service apis for RM, NM, mapreduce app master, and job history server</b><br>
-     <blockquote>new files added: A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebServicesIntro.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
-
+     <blockquote>new files added: A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebServicesIntro.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
+
 The hadoop-project/src/site/site.xml is split into separate patch.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3548">MAPREDUCE-3548</a>.
      Critical sub-task reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
@@ -5471,7 +5471,7 @@ The hadoop-project/src/site/site.xml is 
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3297">MAPREDUCE-3297</a>.
      Major task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
      <b>Move Log Related components from yarn-server-nodemanager to yarn-common</b><br>
-     <blockquote>Moved log related components into yarn-common so that HistoryServer and clients can use them without depending on the yarn-server-nodemanager module.
+     <blockquote>Moved log related components into yarn-common so that HistoryServer and clients can use them without depending on the yarn-server-nodemanager module.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3291">MAPREDUCE-3291</a>.
      Blocker bug reported by Ramya Sunil and fixed by Robert Joseph Evans (mrv2)<br>
@@ -5504,17 +5504,17 @@ The hadoop-project/src/site/site.xml is 
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3219">MAPREDUCE-3219</a>.
      Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2 , test)<br>
      <b>ant test TestDelegationToken failing on trunk</b><br>
-     <blockquote>Reenabled and fixed bugs in the failing test TestDelegationToken.
+     <blockquote>Reenabled and fixed bugs in the failing test TestDelegationToken.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3217">MAPREDUCE-3217</a>.
      Minor sub-task reported by Hitesh Shah and fixed by Devaraj K (mrv2 , test)<br>
      <b>ant test TestAuditLogger fails on trunk</b><br>
-     <blockquote>Reenabled and fixed bugs in the failing ant test TestAuditLogger.
+     <blockquote>Reenabled and fixed bugs in the failing ant test TestAuditLogger.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3215">MAPREDUCE-3215</a>.
      Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
      <b>org.apache.hadoop.mapreduce.TestNoJobSetupCleanup failing on trunk</b><br>
-     <blockquote>Reneabled and fixed bugs in the failing test TestNoJobSetupCleanup.
+     <blockquote>Reneabled and fixed bugs in the failing test TestNoJobSetupCleanup.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3194">MAPREDUCE-3194</a>.
      Major bug reported by Siddharth Seth and fixed by Jason Lowe (mrv2)<br>
@@ -5875,12 +5875,12 @@ The hadoop-project/src/site/site.xml is 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2246">HDFS-2246</a>.
      Major improvement reported by Sanjay Radia and fixed by Jitendra Nath Pandey <br>
      <b>Shortcut a local client reads to a Datanodes files directly</b><br>
-     <blockquote>1. New configurations
-a. dfs.block.local-path-access.user is the key in datanode configuration to specify the user allowed to do short circuit read.
-b. dfs.client.read.shortcircuit is the key to enable short circuit read at the client side configuration.
-c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum check at the client side.
-2. By default none of the above are enabled and short circuit read will not kick in.
-3. If security is on, the feature can be used only for user that has kerberos credentials at the client, therefore map reduce tasks cannot benefit from it in general.
+     <blockquote>1. New configurations
+a. dfs.block.local-path-access.user is the key in datanode configuration to specify the user allowed to do short circuit read.
+b. dfs.client.read.shortcircuit is the key to enable short circuit read at the client side configuration.
+c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum check at the client side.
+2. By default none of the above are enabled and short circuit read will not kick in.
+3. If security is on, the feature can be used only for user that has kerberos credentials at the client, therefore map reduce tasks cannot benefit from it in general.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2178">HDFS-2178</a>.
      Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
@@ -6161,7 +6161,7 @@ c. dfs.client.read.shortcircuit.skip.che
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7802">HADOOP-7802</a>.
      Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; <br>
      <b>Hadoop scripts unconditionally source "$bin"/../libexec/hadoop-config.sh.</b><br>
-     <blockquote>Here is a patch to enable this behavior
+     <blockquote>Here is a patch to enable this behavior
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7801">HADOOP-7801</a>.
      Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (build)<br>
@@ -6486,9 +6486,9 @@ These release notes include new develope
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3186">MAPREDUCE-3186</a>.
      Blocker bug reported by Ramgopal N and fixed by Eric Payne (mrv2)<br>
      <b>User jobs are getting hanged if the Resource manager process goes down and comes up while job is getting executed.</b><br>
-     <blockquote>New Yarn configuration property:
-
-Name: yarn.app.mapreduce.am.scheduler.connection.retries
+     <blockquote>New Yarn configuration property:
+
+Name: yarn.app.mapreduce.am.scheduler.connection.retries
 Description: Number of times AM should retry to contact RM if connection is lost.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3185">MAPREDUCE-3185</a>.
      Critical bug reported by Mahadev konar and fixed by Jonathan Eagles (mrv2)<br>
@@ -6641,7 +6641,7 @@ Description: Number of times AM should r
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3112">MAPREDUCE-3112</a>.
      Major bug reported by Eric Yang and fixed by Eric Yang (contrib/streaming)<br>
      <b>Calling hadoop cli inside mapreduce job leads to errors</b><br>
-     <blockquote>Removed inheritance of certain server environment variables (HADOOP_OPTS and HADOOP_ROOT_LOGGER) in task attempt process.
+     <blockquote>Removed inheritance of certain server environment variables (HADOOP_OPTS and HADOOP_ROOT_LOGGER) in task attempt process.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3110">MAPREDUCE-3110</a>.
      Major bug reported by Devaraj K and fixed by Vinod Kumar Vavilapalli (mrv2 , test)<br>
@@ -7114,16 +7114,16 @@ Description: Number of times AM should r
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2858">MAPREDUCE-2858</a>.
      Blocker sub-task reported by Luke Lu and fixed by Robert Joseph Evans (applicationmaster , mrv2 , security)<br>
      <b>MRv2 WebApp Security</b><br>
-     <blockquote>A new server has been added to yarn.  It is a web proxy that sits in front of the AM web UI.  The server is controlled by the yarn.web-proxy.address config.  If that config is set, and it points to an address that is different then the RM web interface then a separate proxy server needs to be launched.
-
-This can be done by running 
-
-yarn-daemon.sh start proxyserver
-
-If a separate proxy server is needed other configs also may need to be set, if security is enabled.
-yarn.web-proxy.principal
-yarn.web-proxy.keytab
-
+     <blockquote>A new server has been added to yarn.  It is a web proxy that sits in front of the AM web UI.  The server is controlled by the yarn.web-proxy.address config.  If that config is set, and it points to an address that is different then the RM web interface then a separate proxy server needs to be launched.
+
+This can be done by running 
+
+yarn-daemon.sh start proxyserver
+
+If a separate proxy server is needed other configs also may need to be set, if security is enabled.
+yarn.web-proxy.principal
+yarn.web-proxy.keytab
+
 The proxy server is stateless and should be able to support a VIP or other load balancing sitting in front of multiple instances of this server.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2854">MAPREDUCE-2854</a>.
      Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
@@ -8061,12 +8061,12 @@ mapreduce.reduce.shuffle.catch.exception
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2037">MAPREDUCE-2037</a>.
      Major new feature reported by Dick King and fixed by Dick King <br>
      <b>Capturing interim progress times, CPU usage, and memory usage, when tasks reach certain progress thresholds</b><br>
-     <blockquote>Capture intermediate task resource consumption information:
-* Time taken so far
-* CPU load [either at the time the data are taken, or exponentially smoothed]
-* Memory load [also either at the time the data are taken, or exponentially smoothed]
-
-This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.
+     <blockquote>Capture intermediate task resource consumption information:
+* Time taken so far
+* CPU load [either at the time the data are taken, or exponentially smoothed]
+* Memory load [also either at the time the data are taken, or exponentially smoothed]
+
+This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2033">MAPREDUCE-2033</a>.
      Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
@@ -8175,24 +8175,24 @@ This would be taken at intervals that de
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-279">MAPREDUCE-279</a>.
      Major improvement reported by Arun C Murthy and fixed by  (mrv2)<br>
      <b>Map-Reduce 2.0</b><br>
-     <blockquote>MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2).
-
-The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM).  An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
-
-The ResourceManager has two main components:
-* Scheduler (S)
-* ApplicationsManager (ASM)
-
-The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc. 
-
-The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.
-
-The CapacityScheduler supports hierarchical queues to allow for more predictable sharing of cluster resources.
-The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.
-
-The NodeManager is the per-machine framework agent who is responsible for launching the applications' containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the Scheduler.
-
-The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
+     <blockquote>MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2).
+
+The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM).  An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
+
+The ResourceManager has two main components:
+* Scheduler (S)
+* ApplicationsManager (ASM)
+
+The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc. 
+
+The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.
+
+The CapacityScheduler supports hierarchical queues to allow for more predictable sharing of cluster resources.
+The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.
+
+The NodeManager is the per-machine framework agent who is responsible for launching the applications' containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the Scheduler.
+
+The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2540">HDFS-2540</a>.
      Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
@@ -8253,10 +8253,10 @@ The per-application ApplicationMaster ha
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2465">HDFS-2465</a>.
      Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , performance)<br>
      <b>Add HDFS support for fadvise readahead and drop-behind</b><br>
-     <blockquote>HDFS now has the ability to use posix_fadvise and sync_data_range syscalls to manage the OS buffer cache. This support is currently considered experimental, and may be enabled by configuring the following keys:
-dfs.datanode.drop.cache.behind.writes - set to true to drop data out of the buffer cache after writing
-dfs.datanode.drop.cache.behind.reads - set to true to drop data out of the buffer cache when performing sequential reads
-dfs.datanode.sync.behind.writes - set to true to trigger dirty page writeback immediately after writing data
+     <blockquote>HDFS now has the ability to use posix_fadvise and sync_data_range syscalls to manage the OS buffer cache. This support is currently considered experimental, and may be enabled by configuring the following keys:
+dfs.datanode.drop.cache.behind.writes - set to true to drop data out of the buffer cache after writing
+dfs.datanode.drop.cache.behind.reads - set to true to drop data out of the buffer cache when performing sequential reads
+dfs.datanode.sync.behind.writes - set to true to trigger dirty page writeback immediately after writing data
 dfs.datanode.readahead.bytes - set to a non-zero value to trigger readahead for sequential reads</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2453">HDFS-2453</a>.
      Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
@@ -9331,7 +9331,7 @@ This is an incompatible change in 0.23. 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1594">HDFS-1594</a>.
      Major bug reported by Devaraj K and fixed by Aaron T. Myers (name-node)<br>
      <b>When the disk becomes full Namenode is getting shutdown and not able to recover</b><br>
-     <blockquote>Implemented a daemon thread to monitor the disk usage for periodically and if the disk usage reaches the threshold value, put the name node into Safe mode so that no modification to file system will occur. Once the disk usage reaches below the threshold, name node will be put out of the safe mode. Here threshold value and interval to check the disk usage are configurable. 
+     <blockquote>Implemented a daemon thread to monitor the disk usage for periodically and if the disk usage reaches the threshold value, put the name node into Safe mode so that no modification to file system will occur. Once the disk usage reaches below the threshold, name node will be put out of the safe mode. Here threshold value and interval to check the disk usage are configurable. 
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1592">HDFS-1592</a>.
      Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
@@ -9376,9 +9376,9 @@ This is an incompatible change in 0.23. 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1547">HDFS-1547</a>.
      Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
      <b>Improve decommission mechanism</b><br>
-     <blockquote>Summary of changes to the decommissioning process:
-# After nodes are decommissioned, they are not shutdown. The decommissioned nodes are not used for writes. For reads, the decommissioned nodes are given as the last location to read from.
-# Number of live and dead decommissioned nodes are displayed in the namenode webUI.
+     <blockquote>Summary of changes to the decommissioning process:
+# After nodes are decommissioned, they are not shutdown. The decommissioned nodes are not used for writes. For reads, the decommissioned nodes are given as the last location to read from.
+# Number of live and dead decommissioned nodes are displayed in the namenode webUI.
 # Decommissioned nodes free capacity is not count towards the the cluster free capacity.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1541">HDFS-1541</a>.
      Major sub-task reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
@@ -9491,10 +9491,10 @@ This is an incompatible change in 0.23. 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1448">HDFS-1448</a>.
      Major new feature reported by Erik Steffl and fixed by Erik Steffl (tools)<br>
      <b>Create multi-format parser for edits logs file, support binary and XML formats initially</b><br>
-     <blockquote>Offline edits viewer feature adds oev tool to hdfs script. Oev makes it possible to convert edits logs to/from native binary and XML formats. It uses the same framework as Offline image viewer.
-
-Example usage:
-
+     <blockquote>Offline edits viewer feature adds oev tool to hdfs script. Oev makes it possible to convert edits logs to/from native binary and XML formats. It uses the same framework as Offline image viewer.
+
+Example usage:
+
 $HADOOP_HOME/bin/hdfs oev -i edits -o output.xml</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1445">HDFS-1445</a>.
      Major sub-task reported by Matt Foley and fixed by Matt Foley (data-node)<br>
@@ -9762,7 +9762,7 @@ This change requires an upgrade at deplo
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7681">HADOOP-7681</a>.
      Minor bug reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
      <b>log4j.properties is missing properties for security audit and hdfs audit should be changed to info</b><br>
-     <blockquote>HADOOP-7681. Fixed security and hdfs audit log4j properties
+     <blockquote>HADOOP-7681. Fixed security and hdfs audit log4j properties
 (Arpit Gupta via Eric Yang)</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7671">HADOOP-7671</a>.
      Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
@@ -10363,8 +10363,8 @@ This change requires an upgrade at deplo
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7227">HADOOP-7227</a>.
      Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey (ipc)<br>
      <b>Remove protocol version check at proxy creation in Hadoop RPC.</b><br>
-     <blockquote>1. Protocol version check is removed from proxy creation, instead version check is performed at server in every rpc call.
-2. This change is backward incompatible because format of the rpc messages is changed to include client version, client method hash and rpc version.
+     <blockquote>1. Protocol version check is removed from proxy creation, instead version check is performed at server in every rpc call.
+2. This change is backward incompatible because format of the rpc messages is changed to include client version, client method hash and rpc version.
 3. rpc version is introduced which should change when the format of rpc messages is changed.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7223">HADOOP-7223</a>.
      Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (fs)<br>

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1397381-1401062