You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2012/10/12 06:35:44 UTC

svn commit: r1397432 [1/3] - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: ./ src/main/docs/ src/main/java/org/apache/hadoop/metrics/ src/main/java/org/apache/hadoop/metrics/file/ src/main/java/org/apache/hadoop/metrics/spi/ src/main/jav...

Author: suresh
Date: Fri Oct 12 04:35:42 2012
New Revision: 1397432

URL: http://svn.apache.org/viewvc?rev=1397432&view=rev
Log:
HADOOP-8911. CRLF characters in source and text files. Contributed Raja Aluri.

Modified:
    hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsException.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsRecord.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/AbstractMetricsContext.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/MetricsRecordImpl.java
    hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DataChecksum.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1397432&r1=1397431&r2=1397432&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Fri Oct 12 04:35:42 2012
@@ -306,6 +306,9 @@ Release 2.0.3-alpha - Unreleased 
     HADOOP-8909. Hadoop Common Maven protoc calls must not depend on external
     sh script. (Chris Nauroth via suresh)
 
+    HADOOP-8911. CRLF characters in source and text files.
+    (Raja Aluri via suresh)
+
   OPTIMIZATIONS
 
     HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html?rev=1397432&r1=1397431&r2=1397432&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html Fri Oct 12 04:35:42 2012
@@ -15,8 +15,8 @@ These release notes include new develope
 <li> <a href="https://issues.apache.org/jira/browse/YARN-137">YARN-137</a>.
      Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (scheduler)<br>
      <b>Change the default scheduler to the CapacityScheduler</b><br>
-     <blockquote>There's some bugs in the FifoScheduler atm - doesn't distribute tasks across nodes and some headroom (available resource) issues.
-That's not the best experience for users trying out the 2.0 branch. The CS with the default configuration of a single queue behaves the same as the FifoScheduler and doesn't have these issues.
+     <blockquote>There's some bugs in the FifoScheduler atm - doesn't distribute tasks across nodes and some headroom (available resource) issues.
+That's not the best experience for users trying out the 2.0 branch. The CS with the default configuration of a single queue behaves the same as the FifoScheduler and doesn't have these issues.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-108">YARN-108</a>.
      Critical bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
@@ -45,73 +45,73 @@ That's not the best experience for users
 <li> <a href="https://issues.apache.org/jira/browse/YARN-79">YARN-79</a>.
      Major bug reported by Bikas Saha and fixed by Vinod Kumar Vavilapalli (client)<br>
      <b>Calling YarnClientImpl.close throws Exception</b><br>
-     <blockquote>The following exception is thrown
-===========
-*org.apache.hadoop.HadoopIllegalArgumentException: Cannot close proxy - is not Closeable or does not provide closeable invocation handler class org.apache.hadoop.yarn.api.impl.pb.client.ClientRMProtocolPBClientImpl*
-	*at org.apache.hadoop.ipc.RPC.stopProxy(RPC.java:624)*
-	*at org.hadoop.yarn.client.YarnClientImpl.stop(YarnClientImpl.java:102)*
-	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.UnmanagedAMLauncher.run(UnmanagedAMLauncher.java:336)
-	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher.testDSShell(TestUnmanagedAMLauncher.java:156)
-	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
-	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
-	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
-	at java.lang.reflect.Method.invoke(Method.java:597)
-	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
-	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
-	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
-	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)
-	at org.junit.runners.BlockJUnit4ClassRunner.runNotIgnored(BlockJUnit4ClassRunner.java:79)
-	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:71)
-	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:49)
-	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)
-	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)
-	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)
-	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)
-	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)
-	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)
-	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)
-	at org.junit.runners.ParentRunner.run(ParentRunner.java:236)
-	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:236)
-	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:134)
-	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:113)
-	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
-	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
-	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
-	at java.lang.reflect.Method.invoke(Method.java:597)
-	at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray(ReflectionUtils.java:189)
-	at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:165)
-	at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:85)
-	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:103)
-	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:74)
+     <blockquote>The following exception is thrown
+===========
+*org.apache.hadoop.HadoopIllegalArgumentException: Cannot close proxy - is not Closeable or does not provide closeable invocation handler class org.apache.hadoop.yarn.api.impl.pb.client.ClientRMProtocolPBClientImpl*
+	*at org.apache.hadoop.ipc.RPC.stopProxy(RPC.java:624)*
+	*at org.hadoop.yarn.client.YarnClientImpl.stop(YarnClientImpl.java:102)*
+	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.UnmanagedAMLauncher.run(UnmanagedAMLauncher.java:336)
+	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher.testDSShell(TestUnmanagedAMLauncher.java:156)
+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+	at java.lang.reflect.Method.invoke(Method.java:597)
+	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)
+	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)
+	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)
+	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)
+	at org.junit.runners.BlockJUnit4ClassRunner.runNotIgnored(BlockJUnit4ClassRunner.java:79)
+	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:71)
+	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:49)
+	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)
+	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)
+	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)
+	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)
+	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)
+	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)
+	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)
+	at org.junit.runners.ParentRunner.run(ParentRunner.java:236)
+	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:236)
+	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:134)
+	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:113)
+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+	at java.lang.reflect.Method.invoke(Method.java:597)
+	at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray(ReflectionUtils.java:189)
+	at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:165)
+	at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:85)
+	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:103)
+	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:74)
 ===========</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-75">YARN-75</a>.
      Major bug reported by Siddharth Seth and fixed by Siddharth Seth <br>
      <b>RMContainer should handle a RELEASE event while RUNNING</b><br>
-     <blockquote>An AppMaster can send a container release at any point. Currently this results in an exception, if this is done while the RM considers the container to be RUNNING.
+     <blockquote>An AppMaster can send a container release at any point. Currently this results in an exception, if this is done while the RM considers the container to be RUNNING.
 The event not being processed correctly also implies that these containers do not show up in the Completed Container List seen by the AM (AMRMProtocol). MR-3902 depends on this set being complete. </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-68">YARN-68</a>.
      Major bug reported by patrick white and fixed by Daryn Sharp (nodemanager)<br>
      <b>NodeManager will refuse to shutdown indefinitely due to container log aggregation</b><br>
-     <blockquote>The nodemanager is able to get into a state where containermanager.logaggregation.AppLogAggregatorImpl will apparently wait
-indefinitely for log aggregation to complete for an application, even if that application has abnormally terminated and is no longer present. 
-
-Observed behavior is that an attempt to stop the nodemanager daemon will return but have no effect, the nm log continually displays messages similar to this:
-
-[Thread-1]2012-08-21 17:44:07,581 INFO
-org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl:
-Waiting for aggregation to complete for application_1345221477405_2733
-
-The only recovery we found to work was to 'kill -9' the nm process.
-
-What exactly causes the NM to enter this state is unclear but we do see this behavior reliably when the NM has run a task which failed, for example when debugging oozie distcp actions and having a distcp map task fail, the NM that was running the container will now enter this state where a shutdown on said NM will never complete, 'never' in this case was waiting for 2 hours before killing the nodemanager process.
+     <blockquote>The nodemanager is able to get into a state where containermanager.logaggregation.AppLogAggregatorImpl will apparently wait
+indefinitely for log aggregation to complete for an application, even if that application has abnormally terminated and is no longer present. 
+
+Observed behavior is that an attempt to stop the nodemanager daemon will return but have no effect, the nm log continually displays messages similar to this:
+
+[Thread-1]2012-08-21 17:44:07,581 INFO
+org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl:
+Waiting for aggregation to complete for application_1345221477405_2733
+
+The only recovery we found to work was to 'kill -9' the nm process.
+
+What exactly causes the NM to enter this state is unclear but we do see this behavior reliably when the NM has run a task which failed, for example when debugging oozie distcp actions and having a distcp map task fail, the NM that was running the container will now enter this state where a shutdown on said NM will never complete, 'never' in this case was waiting for 2 hours before killing the nodemanager process.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-66">YARN-66</a>.
      Critical bug reported by Thomas Graves and fixed by Thomas Graves (nodemanager)<br>
      <b>aggregated logs permissions not set properly</b><br>
-     <blockquote>If the default file permissions are set to something restrictive - like 700, application logs get aggregated and created with those restrictive file permissions which doesn't allow the history server to serve them up.
-
-
-They need to be created with group readable similar to how log aggregation sets up the directory permissions.
+     <blockquote>If the default file permissions are set to something restrictive - like 700, application logs get aggregated and created with those restrictive file permissions which doesn't allow the history server to serve them up.
+
+
+They need to be created with group readable similar to how log aggregation sets up the directory permissions.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-63">YARN-63</a>.
      Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
@@ -128,47 +128,47 @@ They need to be created with group reada
 <li> <a href="https://issues.apache.org/jira/browse/YARN-42">YARN-42</a>.
      Major bug reported by Devaraj K and fixed by Devaraj K (nodemanager)<br>
      <b>Node Manager throws NPE on startup</b><br>
-     <blockquote>NM throws NPE on startup if it doesn't have persmission's on nm local dir's
-
-
-{code:xml}
-2012-05-14 16:32:13,468 FATAL org.apache.hadoop.yarn.server.nodemanager.NodeManager: Error starting NodeManager
-org.apache.hadoop.yarn.YarnException: Failed to initialize LocalizationService
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:202)
-	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.init(ContainerManagerImpl.java:183)
-	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.init(NodeManager.java:166)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:268)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:284)
-Caused by: java.io.IOException: mkdir of /mrv2/tmp/nm-local-dir/usercache failed
-	at org.apache.hadoop.fs.FileSystem.primitiveMkdir(FileSystem.java:907)
-	at org.apache.hadoop.fs.DelegateToFileSystem.mkdir(DelegateToFileSystem.java:143)
-	at org.apache.hadoop.fs.FilterFs.mkdir(FilterFs.java:189)
-	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:706)
-	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:703)
-	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2325)
-	at org.apache.hadoop.fs.FileContext.mkdir(FileContext.java:703)
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:188)
-	... 6 more
-2012-05-14 16:32:13,472 INFO org.apache.hadoop.yarn.service.CompositeService: Error stopping org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler
-java.lang.NullPointerException
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.stop(NonAggregatingLogHandler.java:82)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
-	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stop(ContainerManagerImpl.java:266)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
-	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
-	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.stop(NodeManager.java:182)
-	at org.apache.hadoop.yarn.service.CompositeService$CompositeServiceShutdownHook.run(CompositeService.java:122)
-	at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
-{code}
+     <blockquote>NM throws NPE on startup if it doesn't have persmission's on nm local dir's
+
+
+{code:xml}
+2012-05-14 16:32:13,468 FATAL org.apache.hadoop.yarn.server.nodemanager.NodeManager: Error starting NodeManager
+org.apache.hadoop.yarn.YarnException: Failed to initialize LocalizationService
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:202)
+	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.init(ContainerManagerImpl.java:183)
+	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.init(NodeManager.java:166)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:268)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:284)
+Caused by: java.io.IOException: mkdir of /mrv2/tmp/nm-local-dir/usercache failed
+	at org.apache.hadoop.fs.FileSystem.primitiveMkdir(FileSystem.java:907)
+	at org.apache.hadoop.fs.DelegateToFileSystem.mkdir(DelegateToFileSystem.java:143)
+	at org.apache.hadoop.fs.FilterFs.mkdir(FilterFs.java:189)
+	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:706)
+	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:703)
+	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2325)
+	at org.apache.hadoop.fs.FileContext.mkdir(FileContext.java:703)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:188)
+	... 6 more
+2012-05-14 16:32:13,472 INFO org.apache.hadoop.yarn.service.CompositeService: Error stopping org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler
+java.lang.NullPointerException
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.stop(NonAggregatingLogHandler.java:82)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stop(ContainerManagerImpl.java:266)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)
+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.stop(NodeManager.java:182)
+	at org.apache.hadoop.yarn.service.CompositeService$CompositeServiceShutdownHook.run(CompositeService.java:122)
+	at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
+{code}
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-39">YARN-39</a>.
      Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
      <b>RM-NM secret-keys should be randomly generated and rolled every so often</b><br>
-     <blockquote> - RM should generate the master-key randomly
- - The master-key should roll every so often
+     <blockquote> - RM should generate the master-key randomly
+ - The master-key should roll every so often
  - NM should remember old expired keys so that already doled out container-requests can be satisfied.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-37">YARN-37</a>.
      Minor bug reported by Jason Lowe and fixed by Mayank Bansal (resourcemanager)<br>
@@ -177,42 +177,42 @@ java.lang.NullPointerException
 <li> <a href="https://issues.apache.org/jira/browse/YARN-36">YARN-36</a>.
      Blocker bug reported by Eli Collins and fixed by Radim Kolar <br>
      <b>branch-2.1.0-alpha doesn't build</b><br>
-     <blockquote>branch-2.1.0-alpha doesn't build due to the following. Per YARN-1 I updated the mvn version to be 2.1.0-SNAPSHOT, before I hit this issue it didn't compile due to the bogus version. 
-
-{noformat}
-hadoop-branch-2.1.0-alpha $ mvn compile
-[INFO] Scanning for projects...
-[ERROR] The build could not read 1 project -&gt; [Help 1]
-[ERROR]   
-[ERROR]   The project org.apache.hadoop:hadoop-yarn-project:2.1.0-SNAPSHOT (/home/eli/src/hadoop-branch-2.1.0-alpha/hadoop-yarn-project/pom.xml) has 1 error
-[ERROR]     'dependencies.dependency.version' for org.hsqldb:hsqldb:jar is missing. @ line 160, column 17
+     <blockquote>branch-2.1.0-alpha doesn't build due to the following. Per YARN-1 I updated the mvn version to be 2.1.0-SNAPSHOT, before I hit this issue it didn't compile due to the bogus version. 
+
+{noformat}
+hadoop-branch-2.1.0-alpha $ mvn compile
+[INFO] Scanning for projects...
+[ERROR] The build could not read 1 project -&gt; [Help 1]
+[ERROR]   
+[ERROR]   The project org.apache.hadoop:hadoop-yarn-project:2.1.0-SNAPSHOT (/home/eli/src/hadoop-branch-2.1.0-alpha/hadoop-yarn-project/pom.xml) has 1 error
+[ERROR]     'dependencies.dependency.version' for org.hsqldb:hsqldb:jar is missing. @ line 160, column 17
 {noformat}</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-31">YARN-31</a>.
      Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
      <b>TestDelegationTokenRenewer fails on jdk7</b><br>
-     <blockquote>TestDelegationTokenRenewer fails when run with jdk7.  
-
+     <blockquote>TestDelegationTokenRenewer fails when run with jdk7.  
+
 With JDK7, test methods run in an undefined order. Here it is expecting that testDTRenewal runs first but it no longer is.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-29">YARN-29</a>.
      Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client)<br>
      <b>Add a yarn-client module</b><br>
-     <blockquote>I see that we are duplicating (some) code for talking to RM via client API. In this light, a yarn-client module will be useful so that clients of all frameworks can use/extend it.
-
+     <blockquote>I see that we are duplicating (some) code for talking to RM via client API. In this light, a yarn-client module will be useful so that clients of all frameworks can use/extend it.
+
 And that same module can be the destination for all the YARN's command line tools.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-27">YARN-27</a>.
      Major bug reported by Ramya Sunil and fixed by Arun C Murthy <br>
      <b>Failed refreshQueues due to misconfiguration prevents further refreshing of queues</b><br>
-     <blockquote>Stumbled upon this problem while refreshing queues with incorrect configuration. The exact scenario was:
-1. Added a new queue "newQueue" without defining its capacity.
-2. "bin/mapred queue -refreshQueues" fails correctly with "Illegal capacity of -1 for queue root.newQueue"
-3. However, after defining the capacity of "newQueue" followed by a second "bin/mapred queue -refreshQueues" throws "org.apache.hadoop.metrics2.MetricsException: Metrics source QueueMetrics,q0=root,q1=newQueue already exists!" Also see Hadoop:name=QueueMetrics,q0=root,q1=newQueue,service=ResourceManager metrics being available even though the queue was not added.
-
+     <blockquote>Stumbled upon this problem while refreshing queues with incorrect configuration. The exact scenario was:
+1. Added a new queue "newQueue" without defining its capacity.
+2. "bin/mapred queue -refreshQueues" fails correctly with "Illegal capacity of -1 for queue root.newQueue"
+3. However, after defining the capacity of "newQueue" followed by a second "bin/mapred queue -refreshQueues" throws "org.apache.hadoop.metrics2.MetricsException: Metrics source QueueMetrics,q0=root,q1=newQueue already exists!" Also see Hadoop:name=QueueMetrics,q0=root,q1=newQueue,service=ResourceManager metrics being available even though the queue was not added.
+
 The expected behavior would be to refresh the queues correctly and allow addition of "newQueue". </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-25">YARN-25</a>.
      Major bug reported by Thomas Graves and fixed by Robert Joseph Evans <br>
      <b>remove old aggregated logs</b><br>
-     <blockquote>Currently the aggregated user logs under NM_REMOTE_APP_LOG_DIR are never removed.  We should have mechanism to remove them after certain period.
-
+     <blockquote>Currently the aggregated user logs under NM_REMOTE_APP_LOG_DIR are never removed.  We should have mechanism to remove them after certain period.
+
 It might make sense for job history server to remove them.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-22">YARN-22</a>.
      Minor bug reported by Eli Collins and fixed by Mayank Bansal <br>
@@ -221,29 +221,29 @@ It might make sense for job history serv
 <li> <a href="https://issues.apache.org/jira/browse/YARN-15">YARN-15</a>.
      Critical bug reported by Alejandro Abdelnur and fixed by Arun C Murthy (nodemanager)<br>
      <b>YarnConfiguration DEFAULT_YARN_APPLICATION_CLASSPATH should be updated</b><br>
-     <blockquote>
-{code}
-  /**
-   * Default CLASSPATH for YARN applications. A comma-separated list of
-   * CLASSPATH entries
-   */
-  public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
-      "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",
-      "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
-      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
-      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
-      "$YARN_HOME/share/hadoop/mapreduce/*",
-      "$YARN_HOME/share/hadoop/mapreduce/lib/*"};
-{code}
-
+     <blockquote>
+{code}
+  /**
+   * Default CLASSPATH for YARN applications. A comma-separated list of
+   * CLASSPATH entries
+   */
+  public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
+      "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",
+      "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
+      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
+      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
+      "$YARN_HOME/share/hadoop/mapreduce/*",
+      "$YARN_HOME/share/hadoop/mapreduce/lib/*"};
+{code}
+
 It should have {{share/yarn/}} and MR should add the {{share/mapreduce/}} (another JIRA?)</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-14">YARN-14</a>.
      Major bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
      <b>Symlinks to peer distributed cache files no longer work</b><br>
-     <blockquote>Trying to create a symlink to another file that is specified for the distributed cache will fail to create the link.  For example:
-
-hadoop jar ... -files "x,y,x#z"
-
+     <blockquote>Trying to create a symlink to another file that is specified for the distributed cache will fail to create the link.  For example:
+
+hadoop jar ... -files "x,y,x#z"
+
 will localize the files x and y as x and y, but the z symlink for x will not be created.  This is a regression from 1.x behavior.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-13">YARN-13</a>.
      Critical bug reported by Todd Lipcon and fixed by  <br>
@@ -252,13 +252,13 @@ will localize the files x and y as x and
 <li> <a href="https://issues.apache.org/jira/browse/YARN-12">YARN-12</a>.
      Major bug reported by Junping Du and fixed by Junping Du (scheduler)<br>
      <b>Several Findbugs issues with new FairScheduler in YARN</b><br>
-     <blockquote>The good feature of FairScheduler is added recently to YARN. As recently PreCommit test from MAPREDUCE-4309, there are several bugs found by Findbugs related to FairScheduler:
-org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.shutdown() might ignore java.lang.Exception
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.logDisabled; locked 50% of time
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.queueMaxAppsDefault; locked 50% of time
-Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.userMaxAppsDefault; locked 50% of time
-The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612//artifact/trunk/patchprocess/newPatchFindbugsWarningshadoop-yarn-server-resourcemanager.html#DE_MIGHT_IGNORE 
-
+     <blockquote>The good feature of FairScheduler is added recently to YARN. As recently PreCommit test from MAPREDUCE-4309, there are several bugs found by Findbugs related to FairScheduler:
+org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.shutdown() might ignore java.lang.Exception
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.logDisabled; locked 50% of time
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.queueMaxAppsDefault; locked 50% of time
+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.userMaxAppsDefault; locked 50% of time
+The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612//artifact/trunk/patchprocess/newPatchFindbugsWarningshadoop-yarn-server-resourcemanager.html#DE_MIGHT_IGNORE 
+
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/YARN-10">YARN-10</a>.
      Major improvement reported by Arun C Murthy and fixed by Hitesh Shah <br>
@@ -991,18 +991,18 @@ The details are in:https://builds.apache
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3812">MAPREDUCE-3812</a>.
      Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Harsh J (mrv2 , performance)<br>
      <b>Lower default allocation sizes, fix allocation configurations and document them</b><br>
-     <blockquote>Removes two sets of previously available config properties:
-
-1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,
-2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )
-
-In favor of two new, generically named properties:
-
-1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.
-2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.
-
-Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.
-
+     <blockquote>Removes two sets of previously available config properties:
+
+1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,
+2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )
+
+In favor of two new, generically named properties:
+
+1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.
+2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.
+
+Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.
+
 Also changes the default minimum and maximums to 128 MB and 10 GB respectively.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3782">MAPREDUCE-3782</a>.
      Critical bug reported by Arpit Gupta and fixed by Jason Lowe (mrv2)<br>
@@ -1043,8 +1043,8 @@ Also changes the default minimum and max
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3543">MAPREDUCE-3543</a>.
      Critical bug reported by Mahadev konar and fixed by Thomas Graves (mrv2)<br>
      <b>Mavenize Gridmix.</b><br>
-     <blockquote>Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.
-
+     <blockquote>Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.
+
 If this is merged to more then trunk, the version inside of hadoop-tools/hadoop-gridmix/pom.xml will need to be udpated accordingly.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3506">MAPREDUCE-3506</a>.
      Minor bug reported by Ratandeep Ratti and fixed by Jason Lowe (client , mrv2)<br>
@@ -1613,10 +1613,10 @@ If this is merged to more then trunk, th
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-3475">HDFS-3475</a>.
      Trivial improvement reported by Harsh J and fixed by Harsh J <br>
      <b>Make the replication and invalidation rates configurable</b><br>
-     <blockquote>This change adds two new configuration parameters. 
-# {{dfs.namenode.invalidate.work.pct.per.iteration}} for controlling deletion rate of blocks. 
-# {{dfs.namenode.replication.work.multiplier.per.iteration}} for controlling replication rate. This in turn allows controlling the time it takes for decommissioning. 
-
+     <blockquote>This change adds two new configuration parameters. 
+# {{dfs.namenode.invalidate.work.pct.per.iteration}} for controlling deletion rate of blocks. 
+# {{dfs.namenode.replication.work.multiplier.per.iteration}} for controlling replication rate. This in turn allows controlling the time it takes for decommissioning. 
+
 Please see hdfs-default.xml for detailed description.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-3474">HDFS-3474</a>.
      Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly <br>
@@ -4769,8 +4769,8 @@ These release notes include new develope
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3720">MAPREDUCE-3720</a>.
      Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client , mrv2)<br>
      <b>Command line listJobs should not visit each AM</b><br>
-     <blockquote>Changed bin/mapred job -list to not print job-specific information not available at RM.
-
+     <blockquote>Changed bin/mapred job -list to not print job-specific information not available at RM.
+
 Very minor incompatibility in cmd-line output, inevitable due to MRv2 architecture.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3718">MAPREDUCE-3718</a>.
      Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2 , performance)<br>
@@ -4819,8 +4819,8 @@ Very minor incompatibility in cmd-line o
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3703">MAPREDUCE-3703</a>.
      Critical bug reported by Eric Payne and fixed by Eric Payne (mrv2 , resourcemanager)<br>
      <b>ResourceManager should provide node lists in JMX output</b><br>
-     <blockquote>New JMX Bean in ResourceManager to provide list of live node managers:
-
+     <blockquote>New JMX Bean in ResourceManager to provide list of live node managers:
+
 Hadoop:service=ResourceManager,name=RMNMInfo LiveNodeManagers</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3702">MAPREDUCE-3702</a>.
      Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
@@ -5037,12 +5037,12 @@ Hadoop:service=ResourceManager,name=RMNM
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3549">MAPREDUCE-3549</a>.
      Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
      <b>write api documentation for web service apis for RM, NM, mapreduce app master, and job history server</b><br>
-     <blockquote>new files added: A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebServicesIntro.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm
-A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
-
+     <blockquote>new files added: A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebServicesIntro.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm
+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm
+
 The hadoop-project/src/site/site.xml is split into separate patch.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3548">MAPREDUCE-3548</a>.
      Critical sub-task reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
@@ -5471,7 +5471,7 @@ The hadoop-project/src/site/site.xml is 
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3297">MAPREDUCE-3297</a>.
      Major task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
      <b>Move Log Related components from yarn-server-nodemanager to yarn-common</b><br>
-     <blockquote>Moved log related components into yarn-common so that HistoryServer and clients can use them without depending on the yarn-server-nodemanager module.
+     <blockquote>Moved log related components into yarn-common so that HistoryServer and clients can use them without depending on the yarn-server-nodemanager module.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3291">MAPREDUCE-3291</a>.
      Blocker bug reported by Ramya Sunil and fixed by Robert Joseph Evans (mrv2)<br>
@@ -5504,17 +5504,17 @@ The hadoop-project/src/site/site.xml is 
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3219">MAPREDUCE-3219</a>.
      Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2 , test)<br>
      <b>ant test TestDelegationToken failing on trunk</b><br>
-     <blockquote>Reenabled and fixed bugs in the failing test TestDelegationToken.
+     <blockquote>Reenabled and fixed bugs in the failing test TestDelegationToken.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3217">MAPREDUCE-3217</a>.
      Minor sub-task reported by Hitesh Shah and fixed by Devaraj K (mrv2 , test)<br>
      <b>ant test TestAuditLogger fails on trunk</b><br>
-     <blockquote>Reenabled and fixed bugs in the failing ant test TestAuditLogger.
+     <blockquote>Reenabled and fixed bugs in the failing ant test TestAuditLogger.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3215">MAPREDUCE-3215</a>.
      Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
      <b>org.apache.hadoop.mapreduce.TestNoJobSetupCleanup failing on trunk</b><br>
-     <blockquote>Reneabled and fixed bugs in the failing test TestNoJobSetupCleanup.
+     <blockquote>Reneabled and fixed bugs in the failing test TestNoJobSetupCleanup.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3194">MAPREDUCE-3194</a>.
      Major bug reported by Siddharth Seth and fixed by Jason Lowe (mrv2)<br>
@@ -5875,12 +5875,12 @@ The hadoop-project/src/site/site.xml is 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2246">HDFS-2246</a>.
      Major improvement reported by Sanjay Radia and fixed by Jitendra Nath Pandey <br>
      <b>Shortcut a local client reads to a Datanodes files directly</b><br>
-     <blockquote>1. New configurations
-a. dfs.block.local-path-access.user is the key in datanode configuration to specify the user allowed to do short circuit read.
-b. dfs.client.read.shortcircuit is the key to enable short circuit read at the client side configuration.
-c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum check at the client side.
-2. By default none of the above are enabled and short circuit read will not kick in.
-3. If security is on, the feature can be used only for user that has kerberos credentials at the client, therefore map reduce tasks cannot benefit from it in general.
+     <blockquote>1. New configurations
+a. dfs.block.local-path-access.user is the key in datanode configuration to specify the user allowed to do short circuit read.
+b. dfs.client.read.shortcircuit is the key to enable short circuit read at the client side configuration.
+c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum check at the client side.
+2. By default none of the above are enabled and short circuit read will not kick in.
+3. If security is on, the feature can be used only for user that has kerberos credentials at the client, therefore map reduce tasks cannot benefit from it in general.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2178">HDFS-2178</a>.
      Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
@@ -6161,7 +6161,7 @@ c. dfs.client.read.shortcircuit.skip.che
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7802">HADOOP-7802</a>.
      Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; <br>
      <b>Hadoop scripts unconditionally source "$bin"/../libexec/hadoop-config.sh.</b><br>
-     <blockquote>Here is a patch to enable this behavior
+     <blockquote>Here is a patch to enable this behavior
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7801">HADOOP-7801</a>.
      Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (build)<br>
@@ -6486,9 +6486,9 @@ These release notes include new develope
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3186">MAPREDUCE-3186</a>.
      Blocker bug reported by Ramgopal N and fixed by Eric Payne (mrv2)<br>
      <b>User jobs are getting hanged if the Resource manager process goes down and comes up while job is getting executed.</b><br>
-     <blockquote>New Yarn configuration property:
-
-Name: yarn.app.mapreduce.am.scheduler.connection.retries
+     <blockquote>New Yarn configuration property:
+
+Name: yarn.app.mapreduce.am.scheduler.connection.retries
 Description: Number of times AM should retry to contact RM if connection is lost.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3185">MAPREDUCE-3185</a>.
      Critical bug reported by Mahadev konar and fixed by Jonathan Eagles (mrv2)<br>
@@ -6641,7 +6641,7 @@ Description: Number of times AM should r
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3112">MAPREDUCE-3112</a>.
      Major bug reported by Eric Yang and fixed by Eric Yang (contrib/streaming)<br>
      <b>Calling hadoop cli inside mapreduce job leads to errors</b><br>
-     <blockquote>Removed inheritance of certain server environment variables (HADOOP_OPTS and HADOOP_ROOT_LOGGER) in task attempt process.
+     <blockquote>Removed inheritance of certain server environment variables (HADOOP_OPTS and HADOOP_ROOT_LOGGER) in task attempt process.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3110">MAPREDUCE-3110</a>.
      Major bug reported by Devaraj K and fixed by Vinod Kumar Vavilapalli (mrv2 , test)<br>
@@ -7114,16 +7114,16 @@ Description: Number of times AM should r
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2858">MAPREDUCE-2858</a>.
      Blocker sub-task reported by Luke Lu and fixed by Robert Joseph Evans (applicationmaster , mrv2 , security)<br>
      <b>MRv2 WebApp Security</b><br>
-     <blockquote>A new server has been added to yarn.  It is a web proxy that sits in front of the AM web UI.  The server is controlled by the yarn.web-proxy.address config.  If that config is set, and it points to an address that is different then the RM web interface then a separate proxy server needs to be launched.
-
-This can be done by running 
-
-yarn-daemon.sh start proxyserver
-
-If a separate proxy server is needed other configs also may need to be set, if security is enabled.
-yarn.web-proxy.principal
-yarn.web-proxy.keytab
-
+     <blockquote>A new server has been added to yarn.  It is a web proxy that sits in front of the AM web UI.  The server is controlled by the yarn.web-proxy.address config.  If that config is set, and it points to an address that is different then the RM web interface then a separate proxy server needs to be launched.
+
+This can be done by running 
+
+yarn-daemon.sh start proxyserver
+
+If a separate proxy server is needed other configs also may need to be set, if security is enabled.
+yarn.web-proxy.principal
+yarn.web-proxy.keytab
+
 The proxy server is stateless and should be able to support a VIP or other load balancing sitting in front of multiple instances of this server.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2854">MAPREDUCE-2854</a>.
      Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
@@ -8061,12 +8061,12 @@ mapreduce.reduce.shuffle.catch.exception
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2037">MAPREDUCE-2037</a>.
      Major new feature reported by Dick King and fixed by Dick King <br>
      <b>Capturing interim progress times, CPU usage, and memory usage, when tasks reach certain progress thresholds</b><br>
-     <blockquote>Capture intermediate task resource consumption information:
-* Time taken so far
-* CPU load [either at the time the data are taken, or exponentially smoothed]
-* Memory load [also either at the time the data are taken, or exponentially smoothed]
-
-This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.
+     <blockquote>Capture intermediate task resource consumption information:
+* Time taken so far
+* CPU load [either at the time the data are taken, or exponentially smoothed]
+* Memory load [also either at the time the data are taken, or exponentially smoothed]
+
+This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2033">MAPREDUCE-2033</a>.
      Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
@@ -8175,24 +8175,24 @@ This would be taken at intervals that de
 <li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-279">MAPREDUCE-279</a>.
      Major improvement reported by Arun C Murthy and fixed by  (mrv2)<br>
      <b>Map-Reduce 2.0</b><br>
-     <blockquote>MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2).
-
-The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM).  An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
-
-The ResourceManager has two main components:
-* Scheduler (S)
-* ApplicationsManager (ASM)
-
-The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc. 
-
-The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.
-
-The CapacityScheduler supports hierarchical queues to allow for more predictable sharing of cluster resources.
-The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.
-
-The NodeManager is the per-machine framework agent who is responsible for launching the applications' containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the Scheduler.
-
-The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
+     <blockquote>MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2).
+
+The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM).  An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.
+
+The ResourceManager has two main components:
+* Scheduler (S)
+* ApplicationsManager (ASM)
+
+The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc. 
+
+The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.
+
+The CapacityScheduler supports hierarchical queues to allow for more predictable sharing of cluster resources.
+The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.
+
+The NodeManager is the per-machine framework agent who is responsible for launching the applications' containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the Scheduler.
+
+The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2540">HDFS-2540</a>.
      Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
@@ -8253,10 +8253,10 @@ The per-application ApplicationMaster ha
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2465">HDFS-2465</a>.
      Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , performance)<br>
      <b>Add HDFS support for fadvise readahead and drop-behind</b><br>
-     <blockquote>HDFS now has the ability to use posix_fadvise and sync_data_range syscalls to manage the OS buffer cache. This support is currently considered experimental, and may be enabled by configuring the following keys:
-dfs.datanode.drop.cache.behind.writes - set to true to drop data out of the buffer cache after writing
-dfs.datanode.drop.cache.behind.reads - set to true to drop data out of the buffer cache when performing sequential reads
-dfs.datanode.sync.behind.writes - set to true to trigger dirty page writeback immediately after writing data
+     <blockquote>HDFS now has the ability to use posix_fadvise and sync_data_range syscalls to manage the OS buffer cache. This support is currently considered experimental, and may be enabled by configuring the following keys:
+dfs.datanode.drop.cache.behind.writes - set to true to drop data out of the buffer cache after writing
+dfs.datanode.drop.cache.behind.reads - set to true to drop data out of the buffer cache when performing sequential reads
+dfs.datanode.sync.behind.writes - set to true to trigger dirty page writeback immediately after writing data
 dfs.datanode.readahead.bytes - set to a non-zero value to trigger readahead for sequential reads</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-2453">HDFS-2453</a>.
      Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
@@ -9331,7 +9331,7 @@ This is an incompatible change in 0.23. 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1594">HDFS-1594</a>.
      Major bug reported by Devaraj K and fixed by Aaron T. Myers (name-node)<br>
      <b>When the disk becomes full Namenode is getting shutdown and not able to recover</b><br>
-     <blockquote>Implemented a daemon thread to monitor the disk usage for periodically and if the disk usage reaches the threshold value, put the name node into Safe mode so that no modification to file system will occur. Once the disk usage reaches below the threshold, name node will be put out of the safe mode. Here threshold value and interval to check the disk usage are configurable. 
+     <blockquote>Implemented a daemon thread to monitor the disk usage for periodically and if the disk usage reaches the threshold value, put the name node into Safe mode so that no modification to file system will occur. Once the disk usage reaches below the threshold, name node will be put out of the safe mode. Here threshold value and interval to check the disk usage are configurable. 
 </blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1592">HDFS-1592</a>.
      Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
@@ -9376,9 +9376,9 @@ This is an incompatible change in 0.23. 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1547">HDFS-1547</a>.
      Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
      <b>Improve decommission mechanism</b><br>
-     <blockquote>Summary of changes to the decommissioning process:
-# After nodes are decommissioned, they are not shutdown. The decommissioned nodes are not used for writes. For reads, the decommissioned nodes are given as the last location to read from.
-# Number of live and dead decommissioned nodes are displayed in the namenode webUI.
+     <blockquote>Summary of changes to the decommissioning process:
+# After nodes are decommissioned, they are not shutdown. The decommissioned nodes are not used for writes. For reads, the decommissioned nodes are given as the last location to read from.
+# Number of live and dead decommissioned nodes are displayed in the namenode webUI.
 # Decommissioned nodes free capacity is not count towards the the cluster free capacity.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1541">HDFS-1541</a>.
      Major sub-task reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
@@ -9491,10 +9491,10 @@ This is an incompatible change in 0.23. 
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1448">HDFS-1448</a>.
      Major new feature reported by Erik Steffl and fixed by Erik Steffl (tools)<br>
      <b>Create multi-format parser for edits logs file, support binary and XML formats initially</b><br>
-     <blockquote>Offline edits viewer feature adds oev tool to hdfs script. Oev makes it possible to convert edits logs to/from native binary and XML formats. It uses the same framework as Offline image viewer.
-
-Example usage:
-
+     <blockquote>Offline edits viewer feature adds oev tool to hdfs script. Oev makes it possible to convert edits logs to/from native binary and XML formats. It uses the same framework as Offline image viewer.
+
+Example usage:
+
 $HADOOP_HOME/bin/hdfs oev -i edits -o output.xml</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HDFS-1445">HDFS-1445</a>.
      Major sub-task reported by Matt Foley and fixed by Matt Foley (data-node)<br>
@@ -9762,7 +9762,7 @@ This change requires an upgrade at deplo
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7681">HADOOP-7681</a>.
      Minor bug reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
      <b>log4j.properties is missing properties for security audit and hdfs audit should be changed to info</b><br>
-     <blockquote>HADOOP-7681. Fixed security and hdfs audit log4j properties
+     <blockquote>HADOOP-7681. Fixed security and hdfs audit log4j properties
 (Arpit Gupta via Eric Yang)</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7671">HADOOP-7671</a>.
      Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
@@ -10363,8 +10363,8 @@ This change requires an upgrade at deplo
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7227">HADOOP-7227</a>.
      Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey (ipc)<br>
      <b>Remove protocol version check at proxy creation in Hadoop RPC.</b><br>
-     <blockquote>1. Protocol version check is removed from proxy creation, instead version check is performed at server in every rpc call.
-2. This change is backward incompatible because format of the rpc messages is changed to include client version, client method hash and rpc version.
+     <blockquote>1. Protocol version check is removed from proxy creation, instead version check is performed at server in every rpc call.
+2. This change is backward incompatible because format of the rpc messages is changed to include client version, client method hash and rpc version.
 3. rpc version is introduced which should change when the format of rpc messages is changed.</blockquote></li>
 <li> <a href="https://issues.apache.org/jira/browse/HADOOP-7223">HADOOP-7223</a>.
      Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (fs)<br>

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java?rev=1397432&r1=1397431&r2=1397432&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ContextFactory.java Fri Oct 12 04:35:42 2012
@@ -1,211 +1,211 @@
-/*
- * ContextFactory.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.spi.NullContext;
-
-/**
- * Factory class for creating MetricsContext objects.  To obtain an instance
- * of this class, use the static <code>getFactory()</code> method.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Evolving
-public class ContextFactory {
-    
-  private static final String PROPERTIES_FILE = 
-    "/hadoop-metrics.properties";
-  private static final String CONTEXT_CLASS_SUFFIX =
-    ".class";
-  private static final String DEFAULT_CONTEXT_CLASSNAME =
-    "org.apache.hadoop.metrics.spi.NullContext";
-    
-  private static ContextFactory theFactory = null;
-    
-  private Map<String,Object> attributeMap = new HashMap<String,Object>();
-  private Map<String,MetricsContext> contextMap = 
-    new HashMap<String,MetricsContext>();
-    
-  // Used only when contexts, or the ContextFactory itself, cannot be
-  // created.
-  private static Map<String,MetricsContext> nullContextMap = 
-    new HashMap<String,MetricsContext>();
-    
-  /** Creates a new instance of ContextFactory */
-  protected ContextFactory() {
-  }
-    
-  /**
-   * Returns the value of the named attribute, or null if there is no 
-   * attribute of that name.
-   *
-   * @param attributeName the attribute name
-   * @return the attribute value
-   */
-  public Object getAttribute(String attributeName) {
-    return attributeMap.get(attributeName);
-  }
-    
-  /**
-   * Returns the names of all the factory's attributes.
-   * 
-   * @return the attribute names
-   */
-  public String[] getAttributeNames() {
-    String[] result = new String[attributeMap.size()];
-    int i = 0;
-    // for (String attributeName : attributeMap.keySet()) {
-    Iterator it = attributeMap.keySet().iterator();
-    while (it.hasNext()) {
-      result[i++] = (String) it.next();
-    }
-    return result;
-  }
-    
-  /**
-   * Sets the named factory attribute to the specified value, creating it
-   * if it did not already exist.  If the value is null, this is the same as
-   * calling removeAttribute.
-   *
-   * @param attributeName the attribute name
-   * @param value the new attribute value
-   */
-  public void setAttribute(String attributeName, Object value) {
-    attributeMap.put(attributeName, value);
-  }
-
-  /**
-   * Removes the named attribute if it exists.
-   *
-   * @param attributeName the attribute name
-   */
-  public void removeAttribute(String attributeName) {
-    attributeMap.remove(attributeName);
-  }
-    
-  /**
-   * Returns the named MetricsContext instance, constructing it if necessary 
-   * using the factory's current configuration attributes. <p/>
-   * 
-   * When constructing the instance, if the factory property 
-   * <i>contextName</i>.class</code> exists, 
-   * its value is taken to be the name of the class to instantiate.  Otherwise,
-   * the default is to create an instance of 
-   * <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a 
-   * dummy "no-op" context which will cause all metric data to be discarded.
-   * 
-   * @param contextName the name of the context
-   * @return the named MetricsContext
-   */
-  public synchronized MetricsContext getContext(String refName, String contextName)
-      throws IOException, ClassNotFoundException,
-             InstantiationException, IllegalAccessException {
-    MetricsContext metricsContext = contextMap.get(refName);
-    if (metricsContext == null) {
-      String classNameAttribute = refName + CONTEXT_CLASS_SUFFIX;
-      String className = (String) getAttribute(classNameAttribute);
-      if (className == null) {
-        className = DEFAULT_CONTEXT_CLASSNAME;
-      }
-      Class contextClass = Class.forName(className);
-      metricsContext = (MetricsContext) contextClass.newInstance();
-      metricsContext.init(contextName, this);
-      contextMap.put(contextName, metricsContext);
-    }
-    return metricsContext;
-  }
-
-  public synchronized MetricsContext getContext(String contextName)
-    throws IOException, ClassNotFoundException, InstantiationException,
-           IllegalAccessException {
-    return getContext(contextName, contextName);
-  }
-  
-  /** 
-   * Returns all MetricsContexts built by this factory.
-   */
-  public synchronized Collection<MetricsContext> getAllContexts() {
-    // Make a copy to avoid race conditions with creating new contexts.
-    return new ArrayList<MetricsContext>(contextMap.values());
-  }
-    
-  /**
-   * Returns a "null" context - one which does nothing.
-   */
-  public static synchronized MetricsContext getNullContext(String contextName) {
-    MetricsContext nullContext = nullContextMap.get(contextName);
-    if (nullContext == null) {
-      nullContext = new NullContext();
-      nullContextMap.put(contextName, nullContext);
-    }
-    return nullContext;
-  }
-    
-  /**
-   * Returns the singleton ContextFactory instance, constructing it if 
-   * necessary. <p/>
-   * 
-   * When the instance is constructed, this method checks if the file 
-   * <code>hadoop-metrics.properties</code> exists on the class path.  If it 
-   * exists, it must be in the format defined by java.util.Properties, and all 
-   * the properties in the file are set as attributes on the newly created
-   * ContextFactory instance.
-   *
-   * @return the singleton ContextFactory instance
-   */
-  public static synchronized ContextFactory getFactory() throws IOException {
-    if (theFactory == null) {
-      theFactory = new ContextFactory();
-      theFactory.setAttributes();
-    }
-    return theFactory;
-  }
-    
-  private void setAttributes() throws IOException {
-    InputStream is = getClass().getResourceAsStream(PROPERTIES_FILE);
-    if (is != null) {
-      try {
-        Properties properties = new Properties();
-        properties.load(is);
-        //for (Object propertyNameObj : properties.keySet()) {
-        Iterator it = properties.keySet().iterator();
-        while (it.hasNext()) {
-          String propertyName = (String) it.next();
-          String propertyValue = properties.getProperty(propertyName);
-          setAttribute(propertyName, propertyValue);
-        }
-      } finally {
-        is.close();
-      }
-    }
-  }
-    
-}
+/*
+ * ContextFactory.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.spi.NullContext;
+
+/**
+ * Factory class for creating MetricsContext objects.  To obtain an instance
+ * of this class, use the static <code>getFactory()</code> method.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class ContextFactory {
+    
+  private static final String PROPERTIES_FILE = 
+    "/hadoop-metrics.properties";
+  private static final String CONTEXT_CLASS_SUFFIX =
+    ".class";
+  private static final String DEFAULT_CONTEXT_CLASSNAME =
+    "org.apache.hadoop.metrics.spi.NullContext";
+    
+  private static ContextFactory theFactory = null;
+    
+  private Map<String,Object> attributeMap = new HashMap<String,Object>();
+  private Map<String,MetricsContext> contextMap = 
+    new HashMap<String,MetricsContext>();
+    
+  // Used only when contexts, or the ContextFactory itself, cannot be
+  // created.
+  private static Map<String,MetricsContext> nullContextMap = 
+    new HashMap<String,MetricsContext>();
+    
+  /** Creates a new instance of ContextFactory */
+  protected ContextFactory() {
+  }
+    
+  /**
+   * Returns the value of the named attribute, or null if there is no 
+   * attribute of that name.
+   *
+   * @param attributeName the attribute name
+   * @return the attribute value
+   */
+  public Object getAttribute(String attributeName) {
+    return attributeMap.get(attributeName);
+  }
+    
+  /**
+   * Returns the names of all the factory's attributes.
+   * 
+   * @return the attribute names
+   */
+  public String[] getAttributeNames() {
+    String[] result = new String[attributeMap.size()];
+    int i = 0;
+    // for (String attributeName : attributeMap.keySet()) {
+    Iterator it = attributeMap.keySet().iterator();
+    while (it.hasNext()) {
+      result[i++] = (String) it.next();
+    }
+    return result;
+  }
+    
+  /**
+   * Sets the named factory attribute to the specified value, creating it
+   * if it did not already exist.  If the value is null, this is the same as
+   * calling removeAttribute.
+   *
+   * @param attributeName the attribute name
+   * @param value the new attribute value
+   */
+  public void setAttribute(String attributeName, Object value) {
+    attributeMap.put(attributeName, value);
+  }
+
+  /**
+   * Removes the named attribute if it exists.
+   *
+   * @param attributeName the attribute name
+   */
+  public void removeAttribute(String attributeName) {
+    attributeMap.remove(attributeName);
+  }
+    
+  /**
+   * Returns the named MetricsContext instance, constructing it if necessary 
+   * using the factory's current configuration attributes. <p/>
+   * 
+   * When constructing the instance, if the factory property 
+   * <i>contextName</i>.class</code> exists, 
+   * its value is taken to be the name of the class to instantiate.  Otherwise,
+   * the default is to create an instance of 
+   * <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a 
+   * dummy "no-op" context which will cause all metric data to be discarded.
+   * 
+   * @param contextName the name of the context
+   * @return the named MetricsContext
+   */
+  public synchronized MetricsContext getContext(String refName, String contextName)
+      throws IOException, ClassNotFoundException,
+             InstantiationException, IllegalAccessException {
+    MetricsContext metricsContext = contextMap.get(refName);
+    if (metricsContext == null) {
+      String classNameAttribute = refName + CONTEXT_CLASS_SUFFIX;
+      String className = (String) getAttribute(classNameAttribute);
+      if (className == null) {
+        className = DEFAULT_CONTEXT_CLASSNAME;
+      }
+      Class contextClass = Class.forName(className);
+      metricsContext = (MetricsContext) contextClass.newInstance();
+      metricsContext.init(contextName, this);
+      contextMap.put(contextName, metricsContext);
+    }
+    return metricsContext;
+  }
+
+  public synchronized MetricsContext getContext(String contextName)
+    throws IOException, ClassNotFoundException, InstantiationException,
+           IllegalAccessException {
+    return getContext(contextName, contextName);
+  }
+  
+  /** 
+   * Returns all MetricsContexts built by this factory.
+   */
+  public synchronized Collection<MetricsContext> getAllContexts() {
+    // Make a copy to avoid race conditions with creating new contexts.
+    return new ArrayList<MetricsContext>(contextMap.values());
+  }
+    
+  /**
+   * Returns a "null" context - one which does nothing.
+   */
+  public static synchronized MetricsContext getNullContext(String contextName) {
+    MetricsContext nullContext = nullContextMap.get(contextName);
+    if (nullContext == null) {
+      nullContext = new NullContext();
+      nullContextMap.put(contextName, nullContext);
+    }
+    return nullContext;
+  }
+    
+  /**
+   * Returns the singleton ContextFactory instance, constructing it if 
+   * necessary. <p/>
+   * 
+   * When the instance is constructed, this method checks if the file 
+   * <code>hadoop-metrics.properties</code> exists on the class path.  If it 
+   * exists, it must be in the format defined by java.util.Properties, and all 
+   * the properties in the file are set as attributes on the newly created
+   * ContextFactory instance.
+   *
+   * @return the singleton ContextFactory instance
+   */
+  public static synchronized ContextFactory getFactory() throws IOException {
+    if (theFactory == null) {
+      theFactory = new ContextFactory();
+      theFactory.setAttributes();
+    }
+    return theFactory;
+  }
+    
+  private void setAttributes() throws IOException {
+    InputStream is = getClass().getResourceAsStream(PROPERTIES_FILE);
+    if (is != null) {
+      try {
+        Properties properties = new Properties();
+        properties.load(is);
+        //for (Object propertyNameObj : properties.keySet()) {
+        Iterator it = properties.keySet().iterator();
+        while (it.hasNext()) {
+          String propertyName = (String) it.next();
+          String propertyValue = properties.getProperty(propertyName);
+          setAttribute(propertyName, propertyValue);
+        }
+      } finally {
+        is.close();
+      }
+    }
+  }
+    
+}

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java?rev=1397432&r1=1397431&r2=1397432&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java (original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsContext.java Fri Oct 12 04:35:42 2012
@@ -1,122 +1,122 @@
-/*
- * MetricsContext.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.spi.OutputRecord;
-
-/**
- * The main interface to the metrics package. 
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface MetricsContext {
-    
-  /**
-   * Default period in seconds at which data is sent to the metrics system.
-   */
-  public static final int DEFAULT_PERIOD = 5;
-
-  /**
-   * Initialize this context.
-   * @param contextName The given name for this context
-   * @param factory The creator of this context
-   */
-  public void init(String contextName, ContextFactory factory);
-
-  /**
-   * Returns the context name.
-   *
-   * @return the context name
-   */
-  public abstract String getContextName();
-    
-  /**
-   * Starts or restarts monitoring, the emitting of metrics records as they are 
-   * updated. 
-   */
-  public abstract void startMonitoring()
-    throws IOException;
-
-  /**
-   * Stops monitoring.  This does not free any data that the implementation
-   * may have buffered for sending at the next timer event. It
-   * is OK to call <code>startMonitoring()</code> again after calling 
-   * this.
-   * @see #close()
-   */
-  public abstract void stopMonitoring();
-    
-  /**
-   * Returns true if monitoring is currently in progress.
-   */
-  public abstract boolean isMonitoring();
-    
-  /**
-   * Stops monitoring and also frees any buffered data, returning this 
-   * object to its initial state.  
-   */
-  public abstract void close();
-    
-  /**
-   * Creates a new MetricsRecord instance with the given <code>recordName</code>.
-   * Throws an exception if the metrics implementation is configured with a fixed
-   * set of record names and <code>recordName</code> is not in that set.
-   *
-   * @param recordName the name of the record
-   * @throws MetricsException if recordName conflicts with configuration data
-   */
-  public abstract MetricsRecord createRecord(String recordName);
-    
-  /**
-   * Registers a callback to be called at regular time intervals, as 
-   * determined by the implementation-class specific configuration.
-   *
-   * @param updater object to be run periodically; it should updated
-   * some metrics records and then return
-   */
-  public abstract void registerUpdater(Updater updater);
-
-  /**
-   * Removes a callback, if it exists.
-   * 
-   * @param updater object to be removed from the callback list
-   */
-  public abstract void unregisterUpdater(Updater updater);
-  
-  /**
-   * Returns the timer period.
-   */
-  public abstract int getPeriod();
-  
-  /**
-   * Retrieves all the records managed by this MetricsContext.
-   * Useful for monitoring systems that are polling-based.
-   * 
-   * @return A non-null map from all record names to the records managed.
-   */
-   Map<String, Collection<OutputRecord>> getAllRecords();
-}
+/*
+ * MetricsContext.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+
+/**
+ * The main interface to the metrics package. 
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface MetricsContext {
+    
+  /**
+   * Default period in seconds at which data is sent to the metrics system.
+   */
+  public static final int DEFAULT_PERIOD = 5;
+
+  /**
+   * Initialize this context.
+   * @param contextName The given name for this context
+   * @param factory The creator of this context
+   */
+  public void init(String contextName, ContextFactory factory);
+
+  /**
+   * Returns the context name.
+   *
+   * @return the context name
+   */
+  public abstract String getContextName();
+    
+  /**
+   * Starts or restarts monitoring, the emitting of metrics records as they are 
+   * updated. 
+   */
+  public abstract void startMonitoring()
+    throws IOException;
+
+  /**
+   * Stops monitoring.  This does not free any data that the implementation
+   * may have buffered for sending at the next timer event. It
+   * is OK to call <code>startMonitoring()</code> again after calling 
+   * this.
+   * @see #close()
+   */
+  public abstract void stopMonitoring();
+    
+  /**
+   * Returns true if monitoring is currently in progress.
+   */
+  public abstract boolean isMonitoring();
+    
+  /**
+   * Stops monitoring and also frees any buffered data, returning this 
+   * object to its initial state.  
+   */
+  public abstract void close();
+    
+  /**
+   * Creates a new MetricsRecord instance with the given <code>recordName</code>.
+   * Throws an exception if the metrics implementation is configured with a fixed
+   * set of record names and <code>recordName</code> is not in that set.
+   *
+   * @param recordName the name of the record
+   * @throws MetricsException if recordName conflicts with configuration data
+   */
+  public abstract MetricsRecord createRecord(String recordName);
+    
+  /**
+   * Registers a callback to be called at regular time intervals, as 
+   * determined by the implementation-class specific configuration.
+   *
+   * @param updater object to be run periodically; it should updated
+   * some metrics records and then return
+   */
+  public abstract void registerUpdater(Updater updater);
+
+  /**
+   * Removes a callback, if it exists.
+   * 
+   * @param updater object to be removed from the callback list
+   */
+  public abstract void unregisterUpdater(Updater updater);
+  
+  /**
+   * Returns the timer period.
+   */
+  public abstract int getPeriod();
+  
+  /**
+   * Retrieves all the records managed by this MetricsContext.
+   * Useful for monitoring systems that are polling-based.
+   * 
+   * @return A non-null map from all record names to the records managed.
+   */
+   Map<String, Collection<OutputRecord>> getAllRecords();
+}