You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2015/07/21 20:25:04 UTC

[01/19] hadoop git commit: Pulling in YARN-3535 to branch 2.7.x

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 857686119 -> 942e1ac21


Pulling in YARN-3535 to branch 2.7.x


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/176131f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/176131f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/176131f1

Branch: refs/heads/HDFS-7240
Commit: 176131f12bc0d467e9caaa6a94b4ba96e09a4539
Parents: 419c51d
Author: Arun Suresh <as...@apache.org>
Authored: Sat Jul 18 10:05:54 2015 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Sat Jul 18 10:05:54 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/176131f1/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8f7a365..e6a3343 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -646,9 +646,6 @@ Release 2.8.0 - UNRELEASED
     YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue is 
     more than 2 level. (Ajith S via wangda)
 
-    YARN-3535. Scheduler must re-request container resources when RMContainer transitions
-    from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -675,6 +672,9 @@ Release 2.7.2 - UNRELEASED
     YARN-3905. Application History Server UI NPEs when accessing apps run after
     RM restart (Eric Payne via jeagles)
 
+    YARN-3535. Scheduler must re-request container resources when RMContainer transitions
+    from ALLOCATED to KILLED (rohithsharma and peng.zhang via asuresh)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES


[14/19] hadoop git commit: HDFS-7582. Enforce maximum number of ACL entries separately per access and default. (Contributed by Vinayakumar B)

Posted by ae...@apache.org.
HDFS-7582. Enforce maximum number of ACL entries separately per access and default. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29cf887b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29cf887b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29cf887b

Branch: refs/heads/HDFS-7240
Commit: 29cf887b226f4ab3c336a6e681db5e8e70699d66
Parents: 87f29c6
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Jul 21 15:16:52 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Jul 21 15:16:52 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/AclTransformation.java | 30 +++++++----
 .../server/namenode/TestAclTransformation.java  | 55 ++++++++++++++++++--
 3 files changed, 76 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29cf887b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f38a870..6c91c45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1057,6 +1057,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8344. NameNode doesn't recover lease for files with missing blocks
     (raviprak)
 
+    HDFS-7582. Enforce maximum number of ACL entries separately per access
+    and default. (vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29cf887b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
index 1474e03..c887e9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
@@ -271,10 +271,6 @@ final class AclTransformation {
    */
   private static List<AclEntry> buildAndValidateAcl(
       ArrayList<AclEntry> aclBuilder) throws AclException {
-    if (aclBuilder.size() > MAX_ENTRIES) {
-      throw new AclException("Invalid ACL: ACL has " + aclBuilder.size() +
-        " entries, which exceeds maximum of " + MAX_ENTRIES + ".");
-    }
     aclBuilder.trimToSize();
     Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
     // Full iteration to check for duplicates and invalid named entries.
@@ -292,9 +288,12 @@ final class AclTransformation {
       }
       prevEntry = entry;
     }
+
+    ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
+    checkMaxEntries(scopedEntries);
+
     // Search for the required base access entries.  If there is a default ACL,
     // then do the same check on the default entries.
-    ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
     for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) {
       AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS)
         .setType(type).build();
@@ -316,6 +315,22 @@ final class AclTransformation {
     return Collections.unmodifiableList(aclBuilder);
   }
 
+  // Check the max entries separately on access and default entries
+  // HDFS-7582
+  private static void checkMaxEntries(ScopedAclEntries scopedEntries)
+      throws AclException {
+    List<AclEntry> accessEntries = scopedEntries.getAccessEntries();
+    List<AclEntry> defaultEntries = scopedEntries.getDefaultEntries();
+    if (accessEntries.size() > MAX_ENTRIES) {
+      throw new AclException("Invalid ACL: ACL has " + accessEntries.size()
+          + " access entries, which exceeds maximum of " + MAX_ENTRIES + ".");
+    }
+    if (defaultEntries.size() > MAX_ENTRIES) {
+      throw new AclException("Invalid ACL: ACL has " + defaultEntries.size()
+          + " default entries, which exceeds maximum of " + MAX_ENTRIES + ".");
+    }
+  }
+
   /**
    * Calculates mask entries required for the ACL.  Mask calculation is performed
    * separately for each scope: access and default.  This method is responsible
@@ -444,11 +459,8 @@ final class AclTransformation {
      * @throws AclException if validation fails
      */
     public ValidatedAclSpec(List<AclEntry> aclSpec) throws AclException {
-      if (aclSpec.size() > MAX_ENTRIES) {
-        throw new AclException("Invalid ACL: ACL spec has " + aclSpec.size() +
-          " entries, which exceeds maximum of " + MAX_ENTRIES + ".");
-      }
       Collections.sort(aclSpec, ACL_ENTRY_COMPARATOR);
+      checkMaxEntries(new ScopedAclEntries(aclSpec));
       this.aclSpec = aclSpec;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29cf887b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
index 23a2677..f66bf2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
@@ -31,11 +31,8 @@ import com.google.common.collect.Lists;
 import org.junit.Test;
 
 import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclEntryScope;
-import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.server.namenode.AclTransformation;
 
 /**
  * Tests operations that modify ACLs.  All tests in this suite have been
@@ -45,10 +42,13 @@ import org.apache.hadoop.hdfs.server.namenode.AclTransformation;
 public class TestAclTransformation {
 
   private static final List<AclEntry> ACL_SPEC_TOO_LARGE;
+  private static final List<AclEntry> ACL_SPEC_DEFAULT_TOO_LARGE;
   static {
     ACL_SPEC_TOO_LARGE = Lists.newArrayListWithCapacity(33);
+    ACL_SPEC_DEFAULT_TOO_LARGE = Lists.newArrayListWithCapacity(33);
     for (int i = 0; i < 33; ++i) {
       ACL_SPEC_TOO_LARGE.add(aclEntry(ACCESS, USER, "user" + i, ALL));
+      ACL_SPEC_DEFAULT_TOO_LARGE.add(aclEntry(DEFAULT, USER, "user" + i, ALL));
     }
   }
 
@@ -351,6 +351,17 @@ public class TestAclTransformation {
     filterAclEntriesByAclSpec(existing, ACL_SPEC_TOO_LARGE);
   }
 
+  @Test(expected = AclException.class)
+  public void testFilterDefaultAclEntriesByAclSpecInputTooLarge()
+      throws AclException {
+    List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
+        .add(aclEntry(DEFAULT, USER, ALL))
+        .add(aclEntry(DEFAULT, GROUP, READ))
+        .add(aclEntry(DEFAULT, OTHER, NONE))
+        .build();
+    filterAclEntriesByAclSpec(existing, ACL_SPEC_DEFAULT_TOO_LARGE);
+  }
+
   @Test
   public void testFilterDefaultAclEntries() throws AclException {
     List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
@@ -720,6 +731,16 @@ public class TestAclTransformation {
   }
 
   @Test(expected=AclException.class)
+  public void testMergeAclDefaultEntriesInputTooLarge() throws AclException {
+    List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
+      .add(aclEntry(DEFAULT, USER, ALL))
+      .add(aclEntry(DEFAULT, GROUP, READ))
+      .add(aclEntry(DEFAULT, OTHER, NONE))
+      .build();
+    mergeAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE);
+  }
+
+  @Test(expected=AclException.class)
   public void testMergeAclEntriesResultTooLarge() throws AclException {
     ImmutableList.Builder<AclEntry> aclBuilder =
       new ImmutableList.Builder<AclEntry>()
@@ -737,6 +758,24 @@ public class TestAclTransformation {
     mergeAclEntries(existing, aclSpec);
   }
 
+  @Test(expected = AclException.class)
+  public void testMergeAclDefaultEntriesResultTooLarge() throws AclException {
+    ImmutableList.Builder<AclEntry> aclBuilder =
+        new ImmutableList.Builder<AclEntry>()
+        .add(aclEntry(DEFAULT, USER, ALL));
+    for (int i = 1; i <= 28; ++i) {
+      aclBuilder.add(aclEntry(DEFAULT, USER, "user" + i, READ));
+    }
+    aclBuilder
+    .add(aclEntry(DEFAULT, GROUP, READ))
+    .add(aclEntry(DEFAULT, MASK, READ))
+    .add(aclEntry(DEFAULT, OTHER, NONE));
+    List<AclEntry> existing = aclBuilder.build();
+    List<AclEntry> aclSpec = Lists.newArrayList(
+         aclEntry(DEFAULT, USER, "bruce", READ));
+    mergeAclEntries(existing, aclSpec);
+  }
+
   @Test(expected=AclException.class)
   public void testMergeAclEntriesDuplicateEntries() throws AclException {
     List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
@@ -1092,6 +1131,16 @@ public class TestAclTransformation {
   }
 
   @Test(expected=AclException.class)
+  public void testReplaceAclDefaultEntriesInputTooLarge() throws AclException {
+    List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
+      .add(aclEntry(DEFAULT, USER, ALL))
+      .add(aclEntry(DEFAULT, GROUP, READ))
+      .add(aclEntry(DEFAULT, OTHER, NONE))
+      .build();
+    replaceAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE);
+  }
+
+  @Test(expected=AclException.class)
   public void testReplaceAclEntriesResultTooLarge() throws AclException {
     List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
       .add(aclEntry(ACCESS, USER, ALL))


[12/19] hadoop git commit: HDFS-7483. Display information per tier on the Namenode UI. Contributed by Benoy Antony and Haohui Mai.

Posted by ae...@apache.org.
HDFS-7483. Display information per tier on the Namenode UI. Contributed by Benoy Antony and Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df1e8ce4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df1e8ce4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df1e8ce4

Branch: refs/heads/HDFS-7240
Commit: df1e8ce44a4716b2cb4ff3d161d7df8081572290
Parents: a628f67
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Jul 20 20:10:53 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Jul 20 20:13:12 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../src/main/webapps/hdfs/dfshealth.html        | 25 ++++++++++++++++++++
 .../src/main/webapps/hdfs/dfshealth.js          |  8 +++++++
 .../blockmanagement/TestBlockStatsMXBean.java   |  1 -
 4 files changed, 36 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1293388..f38a870 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -729,6 +729,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
 
+    HDFS-7483. Display information per tier on the Namenode UI.
+    (Benoy Antony and wheat9 via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 5a3a309..8cdff84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -199,6 +199,31 @@
   {#failed}{#helper_dir_status type="Failed"/}{/failed}
   {/nn.NameDirStatuses}
 </table>
+<div class="page-header"><h1>DFS Storage Types</h1></div>
+<small>
+<table class="table">
+  <thead>
+    <tr>
+      <th>Storage Type</th>
+      <th>Configured Capacity</th>
+      <th>Capacity Used</th>
+      <th>Capacity Remaining</th>
+      <th>Block Pool Used</th>
+      <th>Nodes In Service</th>
+    </tr>
+  </thead>
+  {#blockstats.StorageTypeStats}
+  <tr>
+    <td>{key}</td>
+    <td>{value.capacityTotal|fmt_bytes}</td>
+    <td>{value.capacityUsed|fmt_bytes} ({value.capacityUsedPercentage|fmt_percentage})</td>
+    <td>{value.capacityRemaining|fmt_bytes} ({value.capacityRemainingPercentage|fmt_percentage})</td>
+    <td>{value.blockPoolUsed|fmt_bytes}</td>
+    <td>{value.nodesInService}</td>
+  </tr>
+  {/blockstats.StorageTypeStats}
+</table>
+</small>
 </script>
 
 <script type="text/x-dust-template" id="tmpl-snapshot">

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index a045e42..1c13493 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -29,6 +29,7 @@
       {"name": "nn",      "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo"},
       {"name": "nnstat",  "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"},
       {"name": "fs",      "url": "/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState"},
+      {"name": "blockstats",      "url": "/jmx?qry=Hadoop:service=NameNode,name=BlockStats"},
       {"name": "mem",     "url": "/jmx?qry=java.lang:type=Memory"}
     ];
 
@@ -88,6 +89,13 @@
         for (var k in d) {
           data[k] = k === 'nn' ? workaround(d[k].beans[0]) : d[k].beans[0];
         }
+
+        var blockstats = data['blockstats'];
+        for (var k in blockstats.StorageTypeStats) {
+          var b = blockstats.StorageTypeStats[k].value;
+          b.capacityUsedPercentage = b.capacityUsed * 100.0 / b.capacityTotal;
+          b.capacityRemainingPercentage = b.capacityRemaining * 100.0 / b.capacityTotal;
+        }
         render();
       }),
       function (url, jqxhr, text, err) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df1e8ce4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index 43d983d..2fe8768 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -106,7 +106,6 @@ public class TestBlockStatsMXBean {
   public void testStorageTypeStatsJMX() throws Exception {
     URL baseUrl = new URL (cluster.getHttpUri(0));
     String result = readOutput(new URL(baseUrl, "/jmx"));
-    System.out.println(result);
 
     Map<String, Object> stat = (Map<String, Object>) JSON.parse(result);
     Object[] beans =(Object[]) stat.get("beans");


[05/19] hadoop git commit: HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString() over getMessage() in logging/span events. (Varun Saxena via stevel)

Posted by ae...@apache.org.
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()  over getMessage() in logging/span events. (Varun Saxena via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9431425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9431425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9431425

Branch: refs/heads/HDFS-7240
Commit: a9431425d1aff657fc1ea501c706235f2ebc518f
Parents: 05fa336
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jul 20 13:13:09 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jul 20 13:13:23 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java    | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index bfa9aac..1b643a9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -978,6 +978,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json").
     (Brahma Reddy Battula via stevel)
 
+    HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
+    over getMessage() in logging/span events. (Varun Saxena via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9431425/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e75de15..cc75f5c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -238,7 +238,7 @@ public class ProtobufRpcEngine implements RpcEngine {
         }
         if (Trace.isTracing()) {
           traceScope.getSpan().addTimelineAnnotation(
-              "Call got exception: " + e.getMessage());
+              "Call got exception: " + e.toString());
         }
         throw new ServiceException(e);
       } finally {


[08/19] hadoop git commit: HDFS-8657. Update docs for mSNN. Contributed by Jesse Yates.

Posted by ae...@apache.org.
HDFS-8657. Update docs for mSNN. Contributed by Jesse Yates.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed01dc70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed01dc70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed01dc70

Branch: refs/heads/HDFS-7240
Commit: ed01dc70b2f4ff4bdcaf71c19acf244da0868a82
Parents: e4f7562
Author: Aaron T. Myers <at...@apache.org>
Authored: Mon Jul 20 16:40:06 2015 -0700
Committer: Aaron T. Myers <at...@apache.org>
Committed: Mon Jul 20 16:40:06 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../markdown/HDFSHighAvailabilityWithNFS.md     | 40 +++++++++++---------
 .../markdown/HDFSHighAvailabilityWithQJM.md     | 32 ++++++++++------
 3 files changed, 45 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed01dc70/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 13d9969..cd32c0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -341,6 +341,8 @@ Trunk (Unreleased)
     HDFS-8627. NPE thrown if unable to fetch token from Namenode
     (J.Andreina via vinayakumarb)
 
+    HDFS-8657. Update docs for mSNN. (Jesse Yates via atm)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed01dc70/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
index 626a473..cc53a38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithNFS.md
@@ -65,18 +65,18 @@ This impacted the total availability of the HDFS cluster in two major ways:
 * Planned maintenance events such as software or hardware upgrades on the
   NameNode machine would result in windows of cluster downtime.
 
-The HDFS High Availability feature addresses the above problems by providing the option of running two redundant NameNodes in the same cluster in an Active/Passive configuration with a hot standby. This allows a fast failover to a new NameNode in the case that a machine crashes, or a graceful administrator-initiated failover for the purpose of planned maintenance.
+The HDFS High Availability feature addresses the above problems by providing the option of running two (or more, as of Hadoop 3.0.0) redundant NameNodes in the same cluster in an Active/Passive configuration with a hot standby(s). This allows a fast failover to a new NameNode in the case that a machine crashes, or a graceful administrator-initiated failover for the purpose of planned maintenance.
 
 Architecture
 ------------
 
-In a typical HA cluster, two separate machines are configured as NameNodes. At any point in time, exactly one of the NameNodes is in an *Active* state, and the other is in a *Standby* state. The Active NameNode is responsible for all client operations in the cluster, while the Standby is simply acting as a slave, maintaining enough state to provide a fast failover if necessary.
+In a typical HA cluster, two or more separate machines are configured as NameNodes. At any point in time, exactly one of the NameNodes is in an *Active* state, and the others are in a *Standby* state. The Active NameNode is responsible for all client operations in the cluster, while the Standby is simply acting as a slave, maintaining enough state to provide a fast failover if necessary.
 
-In order for the Standby node to keep its state synchronized with the Active node, the current implementation requires that the two nodes both have access to a directory on a shared storage device (eg an NFS mount from a NAS). This restriction will likely be relaxed in future versions.
+In order for the Standby nodes to keep their state synchronized with the Active node, the current implementation requires that the nodes have access to a directory on a shared storage device (eg an NFS mount from a NAS). This restriction will likely be relaxed in future versions.
 
-When any namespace modification is performed by the Active node, it durably logs a record of the modification to an edit log file stored in the shared directory. The Standby node is constantly watching this directory for edits, and as it sees the edits, it applies them to its own namespace. In the event of a failover, the Standby will ensure that it has read all of the edits from the shared storage before promoting itself to the Active state. This ensures that the namespace state is fully synchronized before a failover occurs.
+When any namespace modification is performed by the Active node, it durably logs a record of the modification to an edit log file stored in the shared directory. The Standby nodes are constantly watching this directory for edits, and as it sees the edits, it applies them to its own namespace. In the event of a failover, the Standby will ensure that it has read all of the edits from the shared storage before promoting itself to the Active state. This ensures that the namespace state is fully synchronized before a failover occurs.
 
-In order to provide a fast failover, it is also necessary that the Standby node have up-to-date information regarding the location of blocks in the cluster. In order to achieve this, the DataNodes are configured with the location of both NameNodes, and send block location information and heartbeats to both.
+In order to provide a fast failover, it is also necessary that the Standby nodes have up-to-date information regarding the location of blocks in the cluster. In order to achieve this, the DataNodes are configured with the location of all NameNodes, and send block location information and heartbeats to all the NameNodes.
 
 It is vital for the correct operation of an HA cluster that only one of the NameNodes be Active at a time. Otherwise, the namespace state would quickly diverge between the two, risking data loss or other incorrect results. In order to ensure this property and prevent the so-called "split-brain scenario," the administrator must configure at least one *fencing method* for the shared storage. During a failover, if it cannot be verified that the previous Active node has relinquished its Active state, the fencing process is responsible for cutting off the previous Active's access to the shared edits storage. This prevents it from making any further edits to the namespace, allowing the new Active to safely proceed with failover.
 
@@ -87,9 +87,9 @@ In order to deploy an HA cluster, you should prepare the following:
 
 * **NameNode machines** - the machines on which you run the Active and Standby NameNodes should have equivalent hardware to each other, and equivalent hardware to what would be used in a non-HA cluster.
 
-* **Shared storage** - you will need to have a shared directory which both NameNode machines can have read/write access to. Typically this is a remote filer which supports NFS and is mounted on each of the NameNode machines. Currently only a single shared edits directory is supported. Thus, the availability of the system is limited by the availability of this shared edits directory, and therefore in order to remove all single points of failure there needs to be redundancy for the shared edits directory. Specifically, multiple network paths to the storage, and redundancy in the storage itself (disk, network, and power). Beacuse of this, it is recommended that the shared storage server be a high-quality dedicated NAS appliance rather than a simple Linux server.
+* **Shared storage** - you will need to have a shared directory which the NameNode machines have read/write access to. Typically this is a remote filer which supports NFS and is mounted on each of the NameNode machines. Currently only a single shared edits directory is supported. Thus, the availability of the system is limited by the availability of this shared edits directory, and therefore in order to remove all single points of failure there needs to be redundancy for the shared edits directory. Specifically, multiple network paths to the storage, and redundancy in the storage itself (disk, network, and power). Beacuse of this, it is recommended that the shared storage server be a high-quality dedicated NAS appliance rather than a simple Linux server.
 
-Note that, in an HA cluster, the Standby NameNode also performs checkpoints of the namespace state, and thus it is not necessary to run a Secondary NameNode, CheckpointNode, or BackupNode in an HA cluster. In fact, to do so would be an error. This also allows one who is reconfiguring a non-HA-enabled HDFS cluster to be HA-enabled to reuse the hardware which they had previously dedicated to the Secondary NameNode.
+Note that, in an HA cluster, the Standby NameNodes also perform checkpoints of the namespace state, and thus it is not necessary to run a Secondary NameNode, CheckpointNode, or BackupNode in an HA cluster. In fact, to do so would be an error. This also allows one who is reconfiguring a non-HA-enabled HDFS cluster to be HA-enabled to reuse the hardware which they had previously dedicated to the Secondary NameNode.
 
 Deployment
 ----------
@@ -124,17 +124,15 @@ The order in which you set these configurations is unimportant, but the values y
 
     Configure with a list of comma-separated NameNode IDs. This will be used by
     DataNodes to determine all the NameNodes in the cluster. For example, if you
-    used "mycluster" as the nameservice ID previously, and you wanted to use "nn1"
-    and "nn2" as the individual IDs of the NameNodes, you would configure this as
+    used "mycluster" as the nameservice ID previously, and you wanted to use "nn1","nn2" and "nn3" as the individual IDs of the NameNodes, you would configure this as
     such:
 
         <property>
           <name>dfs.ha.namenodes.mycluster</name>
-          <value>nn1,nn2</value>
+          <value>nn1,nn2,nn3</value>
         </property>
 
-    **Note:** Currently, only a maximum of two NameNodes may be configured per
-    nameservice.
+    **Note:** The minimum number of NameNodes for HA is two, but you can configure more. Its suggested to not exceed 5 - with a recommended 3 NameNodes - due to communication overheads.
 
 *   **dfs.namenode.rpc-address.[nameservice ID].[name node ID]** - the fully-qualified RPC address for each NameNode to listen on
 
@@ -150,6 +148,10 @@ The order in which you set these configurations is unimportant, but the values y
           <name>dfs.namenode.rpc-address.mycluster.nn2</name>
           <value>machine2.example.com:8020</value>
         </property>
+        <property>
+          <name>dfs.namenode.rpc-address.mycluster.nn3</name>
+          <value>machine3.example.com:8020</value>
+        </property>
 
     **Note:** You may similarly configure the "**servicerpc-address**" setting if
     you so desire.
@@ -167,6 +169,10 @@ The order in which you set these configurations is unimportant, but the values y
           <name>dfs.namenode.http-address.mycluster.nn2</name>
           <value>machine2.example.com:50070</value>
         </property>
+        <property>
+          <name>dfs.namenode.http-address.mycluster.nn3</name>
+          <value>machine3.example.com:50070</value>
+        </property>
 
     **Note:** If you have Hadoop's security features enabled, you should also set
     the *https-address* similarly for each NameNode.
@@ -174,9 +180,9 @@ The order in which you set these configurations is unimportant, but the values y
 *   **dfs.namenode.shared.edits.dir** - the location of the shared storage directory
 
     This is where one configures the path to the remote shared edits directory
-    which the Standby NameNode uses to stay up-to-date with all the file system
+    which the Standby NameNodes use to stay up-to-date with all the file system
     changes the Active NameNode makes. **You should only configure one of these
-    directories.** This directory should be mounted r/w on both NameNode machines.
+    directories.** This directory should be mounted r/w on the NameNode machines.
     The value of this setting should be the absolute path to this directory on the
     NameNode machines. For example:
 
@@ -203,7 +209,7 @@ The order in which you set these configurations is unimportant, but the values y
     It is critical for correctness of the system that only one NameNode be in the
     Active state at any given time. Thus, during a failover, we first ensure that
     the Active NameNode is either in the Standby state, or the process has
-    terminated, before transitioning the other NameNode to the Active state. In
+    terminated, before transitioning another NameNode to the Active state. In
     order to do this, you must configure at least one **fencing method.** These are
     configured as a carriage-return-separated list, which will be attempted in order
     until one indicates that fencing has succeeded. There are two methods which
@@ -320,7 +326,7 @@ After all of the necessary configuration options have been set, one must initial
 * If you have already formatted the NameNode, or are converting a
   non-HA-enabled cluster to be HA-enabled, you should now copy over the
   contents of your NameNode metadata directories to the other, unformatted
-  NameNode by running the command "*hdfs namenode -bootstrapStandby*" on the
+  NameNodes by running the command "*hdfs namenode -bootstrapStandby*" on the
   unformatted NameNode. Running this command will also ensure that the shared
   edits directory (as configured by **dfs.namenode.shared.edits.dir**) contains
   sufficient edits transactions to be able to start both NameNodes.
@@ -329,7 +335,7 @@ After all of the necessary configuration options have been set, one must initial
   command "*hdfs -initializeSharedEdits*", which will initialize the shared
   edits directory with the edits data from the local NameNode edits directories.
 
-At this point you may start both of your HA NameNodes as you normally would start a NameNode.
+At this point you may start all of your HA NameNodes as you normally would start a NameNode.
 
 You can visit each of the NameNodes' web pages separately by browsing to their configured HTTP addresses. You should notice that next to the configured address will be the HA state of the NameNode (either "standby" or "active".) Whenever an HA NameNode starts, it is initially in the Standby state.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed01dc70/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index 06e1bb1..d9d9a67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -65,16 +65,16 @@ This impacted the total availability of the HDFS cluster in two major ways:
 * Planned maintenance events such as software or hardware upgrades on the
   NameNode machine would result in windows of cluster downtime.
 
-The HDFS High Availability feature addresses the above problems by providing the option of running two redundant NameNodes in the same cluster in an Active/Passive configuration with a hot standby. This allows a fast failover to a new NameNode in the case that a machine crashes, or a graceful administrator-initiated failover for the purpose of planned maintenance.
+The HDFS High Availability feature addresses the above problems by providing the option of running two (and as of 3.0.0 more than two) redundant NameNodes in the same cluster in an Active/Passive configuration with a hot standby. This allows a fast failover to a new NameNode in the case that a machine crashes, or a graceful administrator-initiated failover for the purpose of planned maintenance.
 
 Architecture
 ------------
 
-In a typical HA cluster, two separate machines are configured as NameNodes. At any point in time, exactly one of the NameNodes is in an *Active* state, and the other is in a *Standby* state. The Active NameNode is responsible for all client operations in the cluster, while the Standby is simply acting as a slave, maintaining enough state to provide a fast failover if necessary.
+In a typical HA cluster, two or more separate machines are configured as NameNodes. At any point in time, exactly one of the NameNodes is in an *Active* state, and the others are in a *Standby* state. The Active NameNode is responsible for all client operations in the cluster, while the Standbys are simply acting as slaves, maintaining enough state to provide a fast failover if necessary.
 
 In order for the Standby node to keep its state synchronized with the Active node, both nodes communicate with a group of separate daemons called "JournalNodes" (JNs). When any namespace modification is performed by the Active node, it durably logs a record of the modification to a majority of these JNs. The Standby node is capable of reading the edits from the JNs, and is constantly watching them for changes to the edit log. As the Standby Node sees the edits, it applies them to its own namespace. In the event of a failover, the Standby will ensure that it has read all of the edits from the JounalNodes before promoting itself to the Active state. This ensures that the namespace state is fully synchronized before a failover occurs.
 
-In order to provide a fast failover, it is also necessary that the Standby node have up-to-date information regarding the location of blocks in the cluster. In order to achieve this, the DataNodes are configured with the location of both NameNodes, and send block location information and heartbeats to both.
+In order to provide a fast failover, it is also necessary that the Standby node have up-to-date information regarding the location of blocks in the cluster. In order to achieve this, the DataNodes are configured with the location of all NameNodes, and send block location information and heartbeats to all.
 
 It is vital for the correct operation of an HA cluster that only one of the NameNodes be Active at a time. Otherwise, the namespace state would quickly diverge between the two, risking data loss or other incorrect results. In order to ensure this property and prevent the so-called "split-brain scenario," the JournalNodes will only ever allow a single NameNode to be a writer at a time. During a failover, the NameNode which is to become active will simply take over the role of writing to the JournalNodes, which will effectively prevent the other NameNode from continuing in the Active state, allowing the new Active to safely proceed with failover.
 
@@ -99,7 +99,7 @@ In order to deploy an HA cluster, you should prepare the following:
   running with N JournalNodes, the system can tolerate at most (N - 1) / 2
   failures and continue to function normally.
 
-Note that, in an HA cluster, the Standby NameNode also performs checkpoints of the namespace state, and thus it is not necessary to run a Secondary NameNode, CheckpointNode, or BackupNode in an HA cluster. In fact, to do so would be an error. This also allows one who is reconfiguring a non-HA-enabled HDFS cluster to be HA-enabled to reuse the hardware which they had previously dedicated to the Secondary NameNode.
+Note that, in an HA cluster, the Standby NameNodes also performs checkpoints of the namespace state, and thus it is not necessary to run a Secondary NameNode, CheckpointNode, or BackupNode in an HA cluster. In fact, to do so would be an error. This also allows one who is reconfiguring a non-HA-enabled HDFS cluster to be HA-enabled to reuse the hardware which they had previously dedicated to the Secondary NameNode.
 
 Deployment
 ----------
@@ -136,16 +136,16 @@ The order in which you set these configurations is unimportant, but the values y
 
     Configure with a list of comma-separated NameNode IDs. This will be used by
     DataNodes to determine all the NameNodes in the cluster. For example, if you
-    used "mycluster" as the nameservice ID previously, and you wanted to use "nn1"
-    and "nn2" as the individual IDs of the NameNodes, you would configure this as
+    used "mycluster" as the nameservice ID previously, and you wanted to use "nn1",
+    "nn2" and "nn3" as the individual IDs of the NameNodes, you would configure this as
     such:
 
         <property>
           <name>dfs.ha.namenodes.mycluster</name>
-          <value>nn1,nn2</value>
+          <value>nn1,nn2, nn3</value>
         </property>
 
-    **Note:** Currently, only a maximum of two NameNodes may be configured per nameservice.
+    **Note:** The minimum number of NameNodes for HA is two, but you can configure more. Its suggested to not exceed 5 - with a recommended 3 NameNodes - due to communication overheads.
 
 *   **dfs.namenode.rpc-address.[nameservice ID].[name node ID]** - the fully-qualified RPC address for each NameNode to listen on
 
@@ -161,6 +161,10 @@ The order in which you set these configurations is unimportant, but the values y
           <name>dfs.namenode.rpc-address.mycluster.nn2</name>
           <value>machine2.example.com:8020</value>
         </property>
+        <property>
+          <name>dfs.namenode.rpc-address.mycluster.nn3</name>
+          <value>machine3.example.com:8020</value>
+        </property>
 
     **Note:** You may similarly configure the "**servicerpc-address**" setting if you so desire.
 
@@ -177,6 +181,10 @@ The order in which you set these configurations is unimportant, but the values y
           <name>dfs.namenode.http-address.mycluster.nn2</name>
           <value>machine2.example.com:50070</value>
         </property>
+        <property>
+          <name>dfs.namenode.http-address.mycluster.nn3</name>
+          <value>machine3.example.com:50070</value>
+        </property>
 
     **Note:** If you have Hadoop's security features enabled, you should also set
     the *https-address* similarly for each NameNode.
@@ -365,8 +373,8 @@ Once the JournalNodes have been started, one must initially synchronize the two
 * If you have already formatted the NameNode, or are converting a
   non-HA-enabled cluster to be HA-enabled, you should now copy over the
   contents of your NameNode metadata directories to the other, unformatted
-  NameNode by running the command "*hdfs namenode -bootstrapStandby*" on the
-  unformatted NameNode. Running this command will also ensure that the
+  NameNode(s) by running the command "*hdfs namenode -bootstrapStandby*" on the
+  unformatted NameNode(s). Running this command will also ensure that the
   JournalNodes (as configured by **dfs.namenode.shared.edits.dir**) contain
   sufficient edits transactions to be able to start both NameNodes.
 
@@ -374,7 +382,7 @@ Once the JournalNodes have been started, one must initially synchronize the two
   command "*hdfs namenode -initializeSharedEdits*", which will initialize the
   JournalNodes with the edits data from the local NameNode edits directories.
 
-At this point you may start both of your HA NameNodes as you normally would start a NameNode.
+At this point you may start all your HA NameNodes as you normally would start a NameNode.
 
 You can visit each of the NameNodes' web pages separately by browsing to their configured HTTP addresses. You should notice that next to the configured address will be the HA state of the NameNode (either "standby" or "active".) Whenever an HA NameNode starts, it is initially in the Standby state.
 
@@ -443,7 +451,7 @@ Apache ZooKeeper is a highly available service for maintaining small amounts of
 
 * **Failure detection** - each of the NameNode machines in the cluster
   maintains a persistent session in ZooKeeper. If the machine crashes, the
-  ZooKeeper session will expire, notifying the other NameNode that a failover
+  ZooKeeper session will expire, notifying the other NameNode(s) that a failover
   should be triggered.
 
 * **Active NameNode election** - ZooKeeper provides a simple mechanism to


[17/19] hadoop git commit: YARN-2003. Support for Application priority : Changes in RM and Capacity Scheduler. (Sunil G via wangda)

Posted by ae...@apache.org.
YARN-2003. Support for Application priority : Changes in RM and Capacity Scheduler. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c39ca541
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c39ca541
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c39ca541

Branch: refs/heads/HDFS-7240
Commit: c39ca541f498712133890961598bbff50d89d68b
Parents: da2d1ac
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Jul 21 09:56:59 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Jul 21 09:57:23 2015 -0700

----------------------------------------------------------------------
 .../sls/scheduler/ResourceSchedulerWrapper.java |  10 +
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |   5 +
 .../server/resourcemanager/RMAppManager.java    |  20 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  15 +-
 .../scheduler/AbstractYarnScheduler.java        |  10 +
 .../server/resourcemanager/scheduler/Queue.java |   8 +
 .../scheduler/SchedulerApplication.java         |  22 ++
 .../scheduler/SchedulerApplicationAttempt.java  |  15 +-
 .../scheduler/YarnScheduler.java                |  20 ++
 .../scheduler/capacity/AbstractCSQueue.java     |   7 +
 .../scheduler/capacity/CapacityScheduler.java   |  73 +++-
 .../CapacitySchedulerConfiguration.java         |  13 +
 .../scheduler/capacity/LeafQueue.java           |  19 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   8 +
 .../scheduler/event/AppAddedSchedulerEvent.java |  28 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |   6 +
 .../scheduler/fifo/FifoScheduler.java           |   6 +
 .../scheduler/policy/FifoComparator.java        |  11 +-
 .../scheduler/policy/SchedulableEntity.java     |   5 +
 .../yarn/server/resourcemanager/MockRM.java     |  31 +-
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../TestWorkPreservingRMRestart.java            |   2 +-
 ...pacityPreemptionPolicyForNodePartitions.java |   1 +
 .../capacity/TestApplicationLimits.java         |   5 +-
 .../capacity/TestApplicationPriority.java       | 345 +++++++++++++++++++
 .../capacity/TestCapacityScheduler.java         |   5 +
 .../scheduler/policy/MockSchedulableEntity.java |  13 +-
 .../security/TestDelegationTokenRenewer.java    |  10 +-
 .../TestRMWebServicesAppsModification.java      |   2 +-
 30 files changed, 664 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
index 08cb1e6..14e2645 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -949,4 +950,13 @@ final public class ResourceSchedulerWrapper
       ContainerStatus containerStatus, RMContainerEventType event) {
     // do nothing
   }
+
+  @Override
+  public Priority checkAndGetApplicationPriority(Priority priority,
+      String user, String queueName, ApplicationId applicationId)
+      throws YarnException {
+    // TODO Dummy implementation.
+    return Priority.newInstance(0);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d0829c1..7259cf2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -139,6 +139,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3116. RM notifies NM whether a container is an AM container or normal
     task container. (Giovanni Matteo Fumarola via zjshen)
 
+    YARN-2003. Support for Application priority : Changes in RM and Capacity 
+    Scheduler. (Sunil G via wangda)
+
   IMPROVEMENTS
 
     YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6b660f7..060635f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1928,6 +1928,11 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_NODELABEL_CONFIGURATION_TYPE =
       CENTALIZED_NODELABEL_CONFIGURATION_TYPE;
 
+  public static final String MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY =
+      YARN_PREFIX + "cluster.max-application-priority";
+
+  public static final int DEFAULT_CLUSTER_LEVEL_APPLICATION_PRIORITY = 0;
+
   @Private
   public static boolean isDistributedNodeLabelConfiguration(Configuration conf) {
     return DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE.equals(conf.get(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 2d9431d..6fd1838 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -329,14 +330,19 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
     ResourceRequest amReq =
         validateAndCreateResourceRequest(submissionContext, isRecovery);
 
+    // Verify and get the update application priority and set back to
+    // submissionContext
+    Priority appPriority = rmContext.getScheduler()
+        .checkAndGetApplicationPriority(submissionContext.getPriority(), user,
+            submissionContext.getQueue(), applicationId);
+    submissionContext.setPriority(appPriority);
+
     // Create RMApp
-    RMAppImpl application =
-        new RMAppImpl(applicationId, rmContext, this.conf,
-            submissionContext.getApplicationName(), user,
-            submissionContext.getQueue(),
-            submissionContext, this.scheduler, this.masterService,
-            submitTime, submissionContext.getApplicationType(),
-            submissionContext.getApplicationTags(), amReq);
+    RMAppImpl application = new RMAppImpl(applicationId, rmContext, this.conf,
+        submissionContext.getApplicationName(), user,
+        submissionContext.getQueue(), submissionContext, this.scheduler,
+        this.masterService, submitTime, submissionContext.getApplicationType(),
+        submissionContext.getApplicationTags(), amReq);
 
     // Concurrent app submissions with same applicationId will fail here
     // Concurrent app submissions with different applicationIds will not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 62d5555..d480c24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -924,17 +924,15 @@ public class RMAppImpl implements RMApp, Recoverable {
       // No existent attempts means the attempt associated with this app was not
       // started or started but not yet saved.
       if (app.attempts.isEmpty()) {
-        app.scheduler.handle(new AppAddedSchedulerEvent(app.applicationId,
-          app.submissionContext.getQueue(), app.user,
-          app.submissionContext.getReservationID()));
+        app.scheduler.handle(new AppAddedSchedulerEvent(app.user,
+            app.submissionContext, false));
         return RMAppState.SUBMITTED;
       }
 
       // Add application to scheduler synchronously to guarantee scheduler
       // knows applications before AM or NM re-registers.
-      app.scheduler.handle(new AppAddedSchedulerEvent(app.applicationId,
-        app.submissionContext.getQueue(), app.user, true,
-          app.submissionContext.getReservationID()));
+      app.scheduler.handle(new AppAddedSchedulerEvent(app.user,
+          app.submissionContext, true));
 
       // recover attempts
       app.recoverAppAttempts();
@@ -960,9 +958,8 @@ public class RMAppImpl implements RMApp, Recoverable {
       RMAppTransition {
     @Override
     public void transition(RMAppImpl app, RMAppEvent event) {
-      app.handler.handle(new AppAddedSchedulerEvent(app.applicationId,
-        app.submissionContext.getQueue(), app.user,
-        app.submissionContext.getReservationID()));
+      app.handler.handle(new AppAddedSchedulerEvent(app.user,
+          app.submissionContext, false));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index aad76fd..094f77d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -691,4 +692,13 @@ public abstract class AbstractYarnScheduler
     }
     return null;
   }
+
+  @Override
+  public Priority checkAndGetApplicationPriority(Priority priorityFromContext,
+      String user, String queueName, ApplicationId applicationId)
+      throws YarnException {
+    // Dummy Implementation till Application Priority changes are done in
+    // specific scheduler.
+    return Priority.newInstance(0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
index 02003c1..8646381 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
@@ -24,6 +24,7 @@ import java.util.Set;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -110,4 +111,11 @@ public interface Queue {
    *          new resource asked
    */
   public void decPendingResource(String nodeLabel, Resource resourceToDec);
+
+  /**
+   * Get the Default Application Priority for this queue
+   *
+   * @return default application priority
+   */
+  public Priority getDefaultApplicationPriority();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java
index 2c788aa..519de98 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 
 @Private
@@ -28,10 +29,18 @@ public class SchedulerApplication<T extends SchedulerApplicationAttempt> {
   private Queue queue;
   private final String user;
   private T currentAttempt;
+  private volatile Priority priority;
 
   public SchedulerApplication(Queue queue, String user) {
     this.queue = queue;
     this.user = user;
+    this.priority = null;
+  }
+
+  public SchedulerApplication(Queue queue, String user, Priority priority) {
+    this.queue = queue;
+    this.user = user;
+    this.priority = priority;
   }
 
   public Queue getQueue() {
@@ -58,4 +67,17 @@ public class SchedulerApplication<T extends SchedulerApplicationAttempt> {
     queue.getMetrics().finishApp(user, rmAppFinalState);
   }
 
+  public Priority getPriority() {
+    return priority;
+  }
+
+  public void setPriority(Priority priority) {
+    this.priority = priority;
+
+    // Also set priority in current running attempt
+    if (null != currentAttempt) {
+      currentAttempt.setPriority(priority);
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 475f2c7..cf543bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -97,7 +97,9 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
   private boolean unmanagedAM = true;
   private boolean amRunning = false;
   private LogAggregationContext logAggregationContext;
-  
+
+  private Priority appPriority = null;
+
   protected ResourceUsage attemptResourceUsage = new ResourceUsage();
   private AtomicLong firstAllocationRequestSentTime = new AtomicLong(0);
   private AtomicLong firstContainerAllocatedTime = new AtomicLong(0);
@@ -726,7 +728,16 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
   public ResourceUsage getAppAttemptResourceUsage() {
     return this.attemptResourceUsage;
   }
-  
+
+  @Override
+  public Priority getPriority() {
+    return appPriority;
+  }
+
+  public void setPriority(Priority appPriority) {
+    this.appPriority = appPriority;
+  }
+
   @Override
   public String getId() {
     return getApplicationId().toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
index b99b217..f629579 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -286,4 +287,23 @@ public interface YarnScheduler extends EventHandler<SchedulerEvent> {
    * @return an EnumSet containing the resource types
    */
   public EnumSet<SchedulerResourceTypes> getSchedulingResourceTypes();
+
+  /**
+   *
+   * Verify whether a submitted application priority is valid as per configured
+   * Queue
+   *
+   * @param priorityFromContext
+   *          Submitted Application priority.
+   * @param user
+   *          User who submitted the Application
+   * @param queueName
+   *          Name of the Queue
+   * @param applicationId
+   *          Application ID
+   * @return Updated Priority from scheduler
+   */
+  public Priority checkAndGetApplicationPriority(Priority priorityFromContext,
+      String user, String queueName, ApplicationId applicationId)
+      throws YarnException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index cd5bd8d..7f8e164 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
@@ -574,4 +575,10 @@ public abstract class AbstractCSQueue implements CSQueue {
     // sorry, you cannot access
     return false;
   }
+
+  @Override
+  public Priority getDefaultApplicationPriority() {
+    // TODO add dummy implementation
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 559dfc6..5a20f8b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -159,6 +160,9 @@ public class CapacityScheduler extends
     new Comparator<FiCaSchedulerApp>() {
     @Override
     public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
+      if (!a1.getPriority().equals(a2.getPriority())) {
+        return a1.getPriority().compareTo(a2.getPriority());
+      }
       return a1.getApplicationId().compareTo(a2.getApplicationId());
     }
   };
@@ -226,6 +230,7 @@ public class CapacityScheduler extends
   private RMNodeLabelsManager labelManager;
   private SchedulerHealth schedulerHealth = new SchedulerHealth();
   long lastNodeUpdateTime;
+  private Priority maxClusterLevelAppPriority;
   /**
    * EXPERT
    */
@@ -326,6 +331,9 @@ public class CapacityScheduler extends
     if (scheduleAsynchronously) {
       asyncSchedulerThread = new AsyncScheduleThread(this);
     }
+    maxClusterLevelAppPriority = Priority.newInstance(yarnConf.getInt(
+        YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY,
+        YarnConfiguration.DEFAULT_CLUSTER_LEVEL_APPLICATION_PRIORITY));
 
     LOG.info("Initialized CapacityScheduler with " +
         "calculator=" + getResourceCalculator().getClass() + ", " +
@@ -692,7 +700,7 @@ public class CapacityScheduler extends
   }
 
   private synchronized void addApplication(ApplicationId applicationId,
-    String queueName, String user, boolean isAppRecovering) {
+      String queueName, String user, boolean isAppRecovering, Priority priority) {
 
     if (mappings != null && mappings.size() > 0) {
       try {
@@ -761,7 +769,7 @@ public class CapacityScheduler extends
     // update the metrics
     queue.getMetrics().submitApp(user);
     SchedulerApplication<FiCaSchedulerApp> application =
-        new SchedulerApplication<FiCaSchedulerApp>(queue, user);
+        new SchedulerApplication<FiCaSchedulerApp>(queue, user, priority);
     applications.put(applicationId, application);
     LOG.info("Accepted application " + applicationId + " from user: " + user
         + ", in queue: " + queueName);
@@ -783,9 +791,9 @@ public class CapacityScheduler extends
         applications.get(applicationAttemptId.getApplicationId());
     CSQueue queue = (CSQueue) application.getQueue();
 
-    FiCaSchedulerApp attempt =
-        new FiCaSchedulerApp(applicationAttemptId, application.getUser(),
-          queue, queue.getActiveUsersManager(), rmContext);
+    FiCaSchedulerApp attempt = new FiCaSchedulerApp(applicationAttemptId,
+        application.getUser(), queue, queue.getActiveUsersManager(), rmContext,
+        application.getPriority());
     if (transferStateFromPreviousAttempt) {
       attempt.transferStateFromPreviousAttempt(application
         .getCurrentAppAttempt());
@@ -1307,7 +1315,8 @@ public class CapacityScheduler extends
         addApplication(appAddedEvent.getApplicationId(),
             queueName,
             appAddedEvent.getUser(),
-            appAddedEvent.getIsAppRecovering());
+            appAddedEvent.getIsAppRecovering(),
+            appAddedEvent.getApplicatonPriority());
       }
     }
     break;
@@ -1833,4 +1842,56 @@ public class CapacityScheduler extends
   private synchronized void setLastNodeUpdateTime(long time) {
     this.lastNodeUpdateTime = time;
   }
+
+  @Override
+  public Priority checkAndGetApplicationPriority(Priority priorityFromContext,
+      String user, String queueName, ApplicationId applicationId)
+      throws YarnException {
+    Priority appPriority = null;
+
+    // ToDo: Verify against priority ACLs
+
+    // Verify the scenario where priority is null from submissionContext.
+    if (null == priorityFromContext) {
+      // Get the default priority for the Queue. If Queue is non-existent, then
+      // use default priority
+      priorityFromContext = getDefaultPriorityForQueue(queueName);
+
+      LOG.info("Application '" + applicationId
+          + "' is submitted without priority "
+          + "hence considering default queue/cluster priority:"
+          + priorityFromContext.getPriority());
+    }
+
+    // Verify whether submitted priority is lesser than max priority
+    // in the cluster. If it is out of found, defining a max cap.
+    if (priorityFromContext.compareTo(getMaxClusterLevelAppPriority()) < 0) {
+      priorityFromContext = Priority
+          .newInstance(getMaxClusterLevelAppPriority().getPriority());
+    }
+
+    appPriority = priorityFromContext;
+
+    LOG.info("Priority '" + appPriority.getPriority()
+        + "' is acceptable in queue :" + queueName + "for application:"
+        + applicationId + "for the user: " + user);
+
+    return appPriority;
+  }
+
+  private Priority getDefaultPriorityForQueue(String queueName) {
+    Queue queue = getQueue(queueName);
+    if (null == queue) {
+      // Return with default application priority
+      return Priority.newInstance(CapacitySchedulerConfiguration
+          .DEFAULT_CONFIGURATION_APPLICATION_PRIORITY);
+    }
+
+    return Priority.newInstance(queue.getDefaultApplicationPriority()
+        .getPriority());
+  }
+
+  public Priority getMaxClusterLevelAppPriority() {
+    return maxClusterLevelAppPriority;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 563643c..be5e6dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -207,6 +207,12 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   public static final String QUEUE_PREEMPTION_DISABLED = "disable_preemption";
 
   @Private
+  public static final String DEFAULT_APPLICATION_PRIORITY = "default-application-priority";
+
+  @Private
+  public static final Integer DEFAULT_CONFIGURATION_APPLICATION_PRIORITY = 0;
+
+  @Private
   public static class QueueMapping {
 
     public enum MappingType {
@@ -947,4 +953,11 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
     
     return configuredNodeLabels;
   }
+
+  public Integer getDefaultApplicationPriorityConfPerQueue(String queue) {
+    Integer defaultPriority = getInt(getQueuePrefix(queue)
+        + DEFAULT_APPLICATION_PRIORITY,
+        DEFAULT_CONFIGURATION_APPLICATION_PRIORITY);
+    return defaultPriority;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 598f279..0ce4d68 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -95,9 +95,11 @@ public class LeafQueue extends AbstractCSQueue {
   
   private int nodeLocalityDelay;
 
-  Map<ApplicationAttemptId, FiCaSchedulerApp> applicationAttemptMap = 
+  Map<ApplicationAttemptId, FiCaSchedulerApp> applicationAttemptMap =
       new HashMap<ApplicationAttemptId, FiCaSchedulerApp>();
-  
+
+  private Priority defaultAppPriorityPerQueue;
+
   Set<FiCaSchedulerApp> pendingApplications;
   
   private float minimumAllocationFactor;
@@ -220,6 +222,9 @@ public class LeafQueue extends AbstractCSQueue {
       }
     }
 
+    defaultAppPriorityPerQueue = Priority.newInstance(conf
+        .getDefaultApplicationPriorityConfPerQueue(getQueuePath()));
+
     LOG.info("Initializing " + queueName + "\n" +
         "capacity = " + queueCapacities.getCapacity() +
         " [= (float) configuredCapacity / 100 ]" + "\n" + 
@@ -265,7 +270,8 @@ public class LeafQueue extends AbstractCSQueue {
         "nodeLocalityDelay = " +  nodeLocalityDelay + "\n" +
         "reservationsContinueLooking = " +
         reservationsContinueLooking + "\n" +
-        "preemptionDisabled = " + getPreemptionDisabled() + "\n");
+        "preemptionDisabled = " + getPreemptionDisabled() + "\n" +
+        "defaultAppPriorityPerQueue = " + defaultAppPriorityPerQueue);
   }
 
   @Override
@@ -2060,7 +2066,12 @@ public class LeafQueue extends AbstractCSQueue {
      );
     this.orderingPolicy = orderingPolicy;
   }
-  
+
+  @Override
+  public Priority getDefaultApplicationPriority() {
+    return defaultAppPriorityPerQueue;
+  }
+
   /*
    * Holds shared values used by all applications in
    * the queue to calculate headroom on demand

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 3085d93..dfeb30f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -72,6 +72,13 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
   public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId, 
       String user, Queue queue, ActiveUsersManager activeUsersManager,
       RMContext rmContext) {
+    this(applicationAttemptId, user, queue, activeUsersManager, rmContext,
+        Priority.newInstance(0));
+  }
+
+  public FiCaSchedulerApp(ApplicationAttemptId applicationAttemptId,
+      String user, Queue queue, ActiveUsersManager activeUsersManager,
+      RMContext rmContext, Priority appPriority) {
     super(applicationAttemptId, user, queue, activeUsersManager, rmContext);
     
     RMApp rmApp = rmContext.getRMApps().get(getApplicationId());
@@ -87,6 +94,7 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
     }
     
     setAMResource(amResource);
+    setPriority(appPriority);
   }
 
   synchronized public boolean containerCompleted(RMContainer rmContainer,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
index a54e4bf..89d2f66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 
 public class AppAddedSchedulerEvent extends SchedulerEvent {
@@ -28,25 +30,35 @@ public class AppAddedSchedulerEvent extends SchedulerEvent {
   private final String user;
   private final ReservationId reservationID;
   private final boolean isAppRecovering;
+  private final Priority appPriority;
 
-  public AppAddedSchedulerEvent(
-      ApplicationId applicationId, String queue, String user) {
-    this(applicationId, queue, user, false, null);
+  public AppAddedSchedulerEvent(ApplicationId applicationId, String queue,
+      String user) {
+    this(applicationId, queue, user, false, null, Priority.newInstance(0));
   }
 
   public AppAddedSchedulerEvent(ApplicationId applicationId, String queue,
-      String user, ReservationId reservationID) {
-    this(applicationId, queue, user, false, reservationID);
+      String user, ReservationId reservationID, Priority appPriority) {
+    this(applicationId, queue, user, false, reservationID, appPriority);
+  }
+
+  public AppAddedSchedulerEvent(String user,
+      ApplicationSubmissionContext submissionContext, boolean isAppRecovering) {
+    this(submissionContext.getApplicationId(), submissionContext.getQueue(),
+        user, isAppRecovering, submissionContext.getReservationID(),
+        submissionContext.getPriority());
   }
 
   public AppAddedSchedulerEvent(ApplicationId applicationId, String queue,
-      String user, boolean isAppRecovering, ReservationId reservationID) {
+      String user, boolean isAppRecovering, ReservationId reservationID,
+      Priority appPriority) {
     super(SchedulerEventType.APP_ADDED);
     this.applicationId = applicationId;
     this.queue = queue;
     this.user = user;
     this.reservationID = reservationID;
     this.isAppRecovering = isAppRecovering;
+    this.appPriority = appPriority;
   }
 
   public ApplicationId getApplicationId() {
@@ -68,4 +80,8 @@ public class AppAddedSchedulerEvent extends SchedulerEvent {
   public ReservationId getReservationID() {
     return reservationID;
   }
+
+  public Priority getApplicatonPriority() {
+    return appPriority;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index e488c76..713bdca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -331,6 +331,12 @@ public abstract class FSQueue implements Queue, Schedulable {
   public void decPendingResource(String nodeLabel, Resource resourceToDec) {
   }
 
+  @Override
+  public Priority getDefaultApplicationPriority() {
+    // TODO add implementation for FSParentQueue
+    return null;
+  }
+
   public boolean fitsInMaxShare(Resource additionalResource) {
     Resource usagePlusAddition =
         Resources.add(getResourceUsage(), additionalResource);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index e66c02c..6b77ceb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -210,6 +210,12 @@ public class FifoScheduler extends
     @Override
     public void decPendingResource(String nodeLabel, Resource resourceToDec) {
     }
+
+    @Override
+    public Priority getDefaultApplicationPriority() {
+      // TODO add implementation for FIFO scheduler
+      return null;
+    }
   };
 
   public FifoScheduler() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
index b92b264..1045386 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
 
 import java.util.*;
+
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
 
@@ -29,9 +30,13 @@ public class FifoComparator
     implements Comparator<SchedulableEntity> {
       
     @Override
-    public int compare(SchedulableEntity r1, SchedulableEntity r2) {
-      int res = r1.compareInputOrderTo(r2);
-      return res;
+  public int compare(SchedulableEntity r1, SchedulableEntity r2) {
+    if (r1.getPriority() != null
+        && !r1.getPriority().equals(r2.getPriority())) {
+      return r1.getPriority().compareTo(r2.getPriority());
     }
+    int res = r1.compareInputOrderTo(r2);
+    return res;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
index 9b9d73d..2ccb1cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
@@ -48,4 +48,9 @@ public interface SchedulableEntity {
    */
   public ResourceUsage getSchedulingResourceUsage();
   
+  /**
+   * Get the priority of the application
+   */
+  public Priority getPriority();
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index d068a94..5080355 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -289,6 +290,15 @@ public class MockRM extends ResourceManager {
     return submitApp(masterMemory, false);
   }
 
+  public RMApp submitApp(int masterMemory, Priority priority) throws Exception {
+    Resource resource = Resource.newInstance(masterMemory, 0);
+    return submitApp(resource, "", UserGroupInformation.getCurrentUser()
+        .getShortUserName(), null, false, null,
+        super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+            YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true,
+        false, false, null, 0, null, true, priority);
+  }
+
   public RMApp submitApp(int masterMemory, boolean unmanaged)
       throws Exception {
     return submitApp(masterMemory, "", UserGroupInformation.getCurrentUser()
@@ -327,7 +337,7 @@ public class MockRM extends ResourceManager {
     return submitApp(resource, name, user, acls, false, queue,
         super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
           YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null,
-          true, false, false, null, 0, null, true);
+          true, false, false, null, 0, null, true, null);
   }
 
   public RMApp submitApp(int masterMemory, String name, String user,
@@ -370,18 +380,19 @@ public class MockRM extends ResourceManager {
     resource.setMemory(masterMemory);
     return submitApp(resource, name, user, acls, unmanaged, queue,
         maxAppAttempts, ts, appType, waitForAccepted, keepContainers,
-        false, null, 0, null, true);
+        false, null, 0, null, true, Priority.newInstance(0));
   }
 
   public RMApp submitApp(int masterMemory, long attemptFailuresValidityInterval)
       throws Exception {
     Resource resource = Records.newRecord(Resource.class);
     resource.setMemory(masterMemory);
+    Priority priority = Priority.newInstance(0);
     return submitApp(resource, "", UserGroupInformation.getCurrentUser()
       .getShortUserName(), null, false, null,
       super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
       YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, false,
-      false, null, attemptFailuresValidityInterval, null, true);
+      false, null, attemptFailuresValidityInterval, null, true, priority);
   }
 
   public RMApp submitApp(int masterMemory, String name, String user,
@@ -391,20 +402,22 @@ public class MockRM extends ResourceManager {
       ApplicationId applicationId) throws Exception {
     Resource resource = Records.newRecord(Resource.class);
     resource.setMemory(masterMemory);
+    Priority priority = Priority.newInstance(0);
     return submitApp(resource, name, user, acls, unmanaged, queue,
       maxAppAttempts, ts, appType, waitForAccepted, keepContainers,
-      isAppIdProvided, applicationId, 0, null, true);
+      isAppIdProvided, applicationId, 0, null, true, priority);
   }
 
   public RMApp submitApp(int masterMemory,
       LogAggregationContext logAggregationContext) throws Exception {
     Resource resource = Records.newRecord(Resource.class);
     resource.setMemory(masterMemory);
+    Priority priority = Priority.newInstance(0);
     return submitApp(resource, "", UserGroupInformation.getCurrentUser()
       .getShortUserName(), null, false, null,
       super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
       YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, false,
-      false, null, 0, logAggregationContext, true);
+      false, null, 0, logAggregationContext, true, priority);
    }
 
   public RMApp submitApp(Resource capability, String name, String user,
@@ -412,7 +425,8 @@ public class MockRM extends ResourceManager {
       int maxAppAttempts, Credentials ts, String appType,
       boolean waitForAccepted, boolean keepContainers, boolean isAppIdProvided,
       ApplicationId applicationId, long attemptFailuresValidityInterval,
-      LogAggregationContext logAggregationContext, boolean cancelTokensWhenComplete)
+      LogAggregationContext logAggregationContext,
+      boolean cancelTokensWhenComplete, Priority priority)
       throws Exception {
     ApplicationId appId = isAppIdProvided ? applicationId : null;
     ApplicationClientProtocol client = getClientRMService();
@@ -429,12 +443,15 @@ public class MockRM extends ResourceManager {
     sub.setApplicationId(appId);
     sub.setApplicationName(name);
     sub.setMaxAppAttempts(maxAppAttempts);
-    if(unmanaged) {
+    if (unmanaged) {
       sub.setUnmanagedAM(true);
     }
     if (queue != null) {
       sub.setQueue(queue);
     }
+    if (priority != null) {
+      sub.setPriority(priority);
+    }
     sub.setApplicationType(appType);
     ContainerLaunchContext clc = Records
         .newRecord(ContainerLaunchContext.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 3db8b7c..f073763 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -219,6 +219,7 @@ public class TestAppManager{
 
     rmContext = mockRMContext(1, now - 10);
     ResourceScheduler scheduler = mockResourceScheduler();
+    ((RMContextImpl)rmContext).setScheduler(scheduler);
     Configuration conf = new Configuration();
     ApplicationMasterService masterService =
         new ApplicationMasterService(rmContext, scheduler);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 32743c9..b556335 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -1056,7 +1056,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     RMApp app0 = rm1.submitApp(resource, "", UserGroupInformation
         .getCurrentUser().getShortUserName(), null, false, null,
         YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS, null, null, true, true,
-        false, null, 0, null, true);
+        false, null, 0, null, true, null);
     MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
 
     am0.allocate("127.0.0.1", 1000, 2, new ArrayList<ContainerId>());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
index b3ac79b..d6f64bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
@@ -1025,6 +1025,7 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
       when(app.getReservedContainers()).thenReturn(reservedContainers);
       when(app.getApplicationAttemptId()).thenReturn(appAttemptId);
       when(app.getApplicationId()).thenReturn(appId);
+      when(app.getPriority()).thenReturn(Priority.newInstance(0));
 
       // add to LeafQueue
       LeafQueue queue = (LeafQueue) nameToCSQueues.get(queueName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
index 484090d..1afebb6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
@@ -155,7 +155,8 @@ public class TestApplicationLimits {
     doReturn(applicationAttemptId). when(application).getApplicationAttemptId();
     doReturn(user).when(application).getUser();
     doReturn(amResource).when(application).getAMResource();
-    when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))).thenCallRealMethod(); 
+    doReturn(Priority.newInstance(0)).when(application).getPriority();
+    when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))).thenCallRealMethod();
     return application;
   }
   
@@ -175,7 +176,7 @@ public class TestApplicationLimits {
     
     ActiveUsersManager activeUsersManager = mock(ActiveUsersManager.class);
     when(queue.getActiveUsersManager()).thenReturn(activeUsersManager);
-    
+
     assertEquals(Resource.newInstance(8 * GB, 1), queue.getAMResourceLimit());
     assertEquals(Resource.newInstance(4 * GB, 1),
       queue.getUserAMResourceLimit());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
new file mode 100644
index 0000000..80eff06
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
@@ -0,0 +1,345 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestApplicationPriority {
+  private static final Log LOG = LogFactory
+      .getLog(TestApplicationPriority.class);
+  private final int GB = 1024;
+
+  private YarnConfiguration conf;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new YarnConfiguration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+  }
+
+  @Test
+  public void testApplicationOrderingWithPriority() throws Exception {
+
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    MockRM rm = new MockRM(conf);
+    rm.start();
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+
+    LeafQueue q = (LeafQueue) cs.getQueue("default");
+    Assert.assertNotNull(q);
+
+    String host = "127.0.0.1";
+    RMNode node = MockNodes.newNodeInfo(0, MockNodes.newResource(16 * GB), 1,
+        host);
+    cs.handle(new NodeAddedSchedulerEvent(node));
+
+    // add app 1 start
+    ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1);
+    ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(
+        appId1, 1);
+
+    RMAppAttemptMetrics attemptMetric1 = new RMAppAttemptMetrics(appAttemptId1,
+        rm.getRMContext());
+    RMAppImpl app1 = mock(RMAppImpl.class);
+    when(app1.getApplicationId()).thenReturn(appId1);
+    RMAppAttemptImpl attempt1 = mock(RMAppAttemptImpl.class);
+    when(attempt1.getAppAttemptId()).thenReturn(appAttemptId1);
+    when(attempt1.getRMAppAttemptMetrics()).thenReturn(attemptMetric1);
+    when(app1.getCurrentAppAttempt()).thenReturn(attempt1);
+
+    rm.getRMContext().getRMApps().put(appId1, app1);
+
+    SchedulerEvent addAppEvent1 = new AppAddedSchedulerEvent(appId1, "default",
+        "user", null, Priority.newInstance(5));
+    cs.handle(addAppEvent1);
+    SchedulerEvent addAttemptEvent1 = new AppAttemptAddedSchedulerEvent(
+        appAttemptId1, false);
+    cs.handle(addAttemptEvent1);
+    // add app1 end
+
+    // add app2 begin
+    ApplicationId appId2 = BuilderUtils.newApplicationId(100, 2);
+    ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(
+        appId2, 1);
+
+    RMAppAttemptMetrics attemptMetric2 = new RMAppAttemptMetrics(appAttemptId2,
+        rm.getRMContext());
+    RMAppImpl app2 = mock(RMAppImpl.class);
+    when(app2.getApplicationId()).thenReturn(appId2);
+    RMAppAttemptImpl attempt2 = mock(RMAppAttemptImpl.class);
+    when(attempt2.getAppAttemptId()).thenReturn(appAttemptId2);
+    when(attempt2.getRMAppAttemptMetrics()).thenReturn(attemptMetric2);
+    when(app2.getCurrentAppAttempt()).thenReturn(attempt2);
+
+    rm.getRMContext().getRMApps().put(appId2, app2);
+
+    SchedulerEvent addAppEvent2 = new AppAddedSchedulerEvent(appId2, "default",
+        "user", null, Priority.newInstance(8));
+    cs.handle(addAppEvent2);
+    SchedulerEvent addAttemptEvent2 = new AppAttemptAddedSchedulerEvent(
+        appAttemptId2, false);
+    cs.handle(addAttemptEvent2);
+    // add app end
+
+    // Now, the first assignment will be for app2 since app2 is of highest
+    // priority
+    assertEquals(q.getApplications().size(), 2);
+    assertEquals(q.getApplications().iterator().next()
+        .getApplicationAttemptId(), appAttemptId2);
+
+    rm.stop();
+  }
+
+  @Test
+  public void testApplicationPriorityAllocation() throws Exception {
+
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    // Set Max Application Priority as 10
+    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    Priority appPriority1 = Priority.newInstance(5);
+    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * GB);
+    RMApp app1 = rm.submitApp(1 * GB, appPriority1);
+
+    // kick the scheduler, 1 GB given to AM1, remaining 15GB on nm1
+    MockAM am1 = MockRM.launchAM(app1, rm, nm1);
+    am1.registerAppAttempt();
+
+    // add request for containers
+    am1.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 7);
+    AllocateResponse alloc1Response = am1.schedule(); // send the request
+
+    // kick the scheduler, 7 containers will be allocated for App1
+    nm1.nodeHeartbeat(true);
+    while (alloc1Response.getAllocatedContainers().size() < 1) {
+      LOG.info("Waiting for containers to be created for app 1...");
+      Thread.sleep(100);
+      alloc1Response = am1.schedule();
+    }
+
+    List<Container> allocated1 = alloc1Response.getAllocatedContainers();
+    Assert.assertEquals(7, allocated1.size());
+    Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
+
+    // check node report, 15 GB used (1 AM and 7 containers) and 1 GB available
+    SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
+        nm1.getNodeId());
+    Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory());
+    Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory());
+
+    // Submit the second app App2 with priority 8 (Higher than App1)
+    Priority appPriority2 = Priority.newInstance(8);
+    RMApp app2 = rm.submitApp(1 * GB, appPriority2);
+
+    // kick the scheduler, 1 GB which was free is given to AM of App2
+    nm1.nodeHeartbeat(true);
+    MockAM am2 = rm.sendAMLaunched(app2.getCurrentAppAttempt()
+        .getAppAttemptId());
+    am2.registerAppAttempt();
+
+    // check node report, 16 GB used and 0 GB available
+    report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
+    Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory());
+    Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+
+    // get scheduler
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+
+    // get scheduler app
+    FiCaSchedulerApp schedulerAppAttempt = cs.getSchedulerApplications()
+        .get(app1.getApplicationId()).getCurrentAppAttempt();
+
+    // kill 2 containers to free up some space
+    int counter = 0;
+    for (Container c : allocated1) {
+      if (++counter > 2) {
+        break;
+      }
+      cs.killContainer(schedulerAppAttempt.getRMContainer(c.getId()));
+    }
+
+    // check node report, 12 GB used and 4 GB available
+    report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
+    Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory());
+    Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory());
+
+    // add request for containers App1
+    am1.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 10);
+    am1.schedule(); // send the request for App1
+
+    // add request for containers App2
+    am2.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 3);
+    AllocateResponse alloc1Response4 = am2.schedule(); // send the request
+
+    // kick the scheduler, since App2 priority is more than App1, it will get
+    // remaining cluster space.
+    nm1.nodeHeartbeat(true);
+    while (alloc1Response4.getAllocatedContainers().size() < 1) {
+      LOG.info("Waiting for containers to be created for app 2...");
+      Thread.sleep(100);
+      alloc1Response4 = am2.schedule();
+    }
+
+    // check node report, 16 GB used and 0 GB available
+    report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
+    Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory());
+    Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+
+    rm.stop();
+  }
+
+  @Test
+  public void testPriorityWithPendingApplications() throws Exception {
+
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    // Set Max Application Priority as 10
+    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    Priority appPriority1 = Priority.newInstance(5);
+    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 8 * GB);
+    RMApp app1 = rm.submitApp(1 * GB, appPriority1);
+
+    // kick the scheduler, 1 GB given to AM1, remaining 7GB on nm1
+    MockAM am1 = MockRM.launchAM(app1, rm, nm1);
+    am1.registerAppAttempt();
+
+    // add request for containers
+    am1.addRequests(new String[]{"127.0.0.1", "127.0.0.2"}, 1 * GB, 1, 7);
+    AllocateResponse alloc1Response = am1.schedule(); // send the request
+
+    // kick the scheduler, 7 containers will be allocated for App1
+    nm1.nodeHeartbeat(true);
+    while (alloc1Response.getAllocatedContainers().size() < 1) {
+      LOG.info("Waiting for containers to be created for app 1...");
+      Thread.sleep(100);
+      alloc1Response = am1.schedule();
+    }
+
+    List<Container> allocated1 = alloc1Response.getAllocatedContainers();
+    Assert.assertEquals(7, allocated1.size());
+    Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
+
+    // check node report, 8 GB used (1 AM and 7 containers) and 0 GB available
+    SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
+        nm1.getNodeId());
+    Assert.assertEquals(8 * GB, report_nm1.getUsedResource().getMemory());
+    Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+
+    // Submit the second app App2 with priority 7
+    Priority appPriority2 = Priority.newInstance(7);
+    RMApp app2 = rm.submitApp(1 * GB, appPriority2);
+
+    // Submit the third app App3 with priority 8
+    Priority appPriority3 = Priority.newInstance(8);
+    RMApp app3 = rm.submitApp(1 * GB, appPriority3);
+
+    // Submit the second app App4 with priority 6
+    Priority appPriority4 = Priority.newInstance(6);
+    RMApp app4 = rm.submitApp(1 * GB, appPriority4);
+
+    // Only one app can run as AM resource limit restricts it. Kill app1,
+    // If app3 (highest priority among rest) gets active, it indicates that
+    // priority is working with pendingApplications.
+    rm.killApp(app1.getApplicationId());
+
+    // kick the scheduler, app3 (high among pending) gets free space
+    nm1.nodeHeartbeat(true);
+    MockAM am3 = rm.sendAMLaunched(app3.getCurrentAppAttempt()
+        .getAppAttemptId());
+    am3.registerAppAttempt();
+
+    // check node report, 1 GB used and 7 GB available
+    report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
+    Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory());
+    Assert.assertEquals(7 * GB, report_nm1.getAvailableResource().getMemory());
+
+    rm.stop();
+  }
+
+  @Test
+  public void testMaxPriorityValidation() throws Exception {
+
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    // Set Max Application Priority as 10
+    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
+    Priority maxPriority = Priority.newInstance(10);
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    Priority appPriority1 = Priority.newInstance(15);
+    rm.registerNode("127.0.0.1:1234", 8 * GB);
+    RMApp app1 = rm.submitApp(1 * GB, appPriority1);
+
+    // Application submission should be successful and verify priority
+    Assert.assertEquals(app1.getApplicationSubmissionContext().getPriority(),
+        maxPriority);
+    rm.stop();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index e8afab2..a8bbac3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -129,6 +129,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfoList;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicy;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
@@ -898,13 +899,17 @@ public class TestCapacityScheduler {
       ApplicationId id1 = ApplicationId.newInstance(1, 1);
       ApplicationId id2 = ApplicationId.newInstance(1, 2);
       ApplicationId id3 = ApplicationId.newInstance(2, 1);
+      Priority priority = Priority.newInstance(0);
       //same clusterId
       FiCaSchedulerApp app1 = Mockito.mock(FiCaSchedulerApp.class);
       when(app1.getApplicationId()).thenReturn(id1);
+      when(app1.getPriority()).thenReturn(priority);
       FiCaSchedulerApp app2 = Mockito.mock(FiCaSchedulerApp.class);
       when(app2.getApplicationId()).thenReturn(id2);
+      when(app2.getPriority()).thenReturn(priority);
       FiCaSchedulerApp app3 = Mockito.mock(FiCaSchedulerApp.class);
       when(app3.getApplicationId()).thenReturn(id3);
+      when(app3.getPriority()).thenReturn(priority);
       assertTrue(appComparator.compare(app1, app2) < 0);
       //different clusterId
       assertTrue(appComparator.compare(app1, app3) < 0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java
index fe8c455..bf4c98a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java
@@ -31,7 +31,8 @@ public class MockSchedulableEntity implements SchedulableEntity {
   
   private String id;
   private long serial = 0;
-  
+  private Priority priority;
+
   public MockSchedulableEntity() { }
   
   public void setId(String id) {
@@ -74,5 +75,13 @@ public class MockSchedulableEntity implements SchedulableEntity {
     }
     return 1;//let other types go before this, if any
   }
-  
+
+  @Override
+  public Priority getPriority() {
+    return priority;
+  }
+
+  public void setApplicationPriority(Priority priority) {
+    this.priority = priority;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 49c7bf9..d85e928 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -1048,13 +1048,13 @@ public class TestDelegationTokenRenewer {
     Resource resource = Records.newRecord(Resource.class);
     resource.setMemory(200);
     RMApp app1 = rm.submitApp(resource, "name", "user", null, false, null, 2,
-        credentials, null, true, false, false, null, 0, null, false);
+        credentials, null, true, false, false, null, 0, null, false, null);
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
     rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
 
     // submit app2 with the same token, set cancelTokenWhenComplete to true;
     RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2,
-        credentials, null, true, false, false, null, 0, null, true);
+        credentials, null, true, false, false, null, 0, null, true, null);
     MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
     rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
     MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
@@ -1114,7 +1114,7 @@ public class TestDelegationTokenRenewer {
     resource.setMemory(200);
     RMApp app1 =
         rm.submitApp(resource, "name", "user", null, false, null, 2, credentials,
-          null, true, false, false, null, 0, null, true);
+          null, true, false, false, null, 0, null, true, null);
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
     rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
 
@@ -1122,7 +1122,7 @@ public class TestDelegationTokenRenewer {
     Assert.assertNotNull(dttr);
     Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
     RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2,
-        credentials, null, true, false, false, null, 0, null, true);
+        credentials, null, true, false, false, null, 0, null, true, null);
     MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
     rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
     Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
@@ -1139,7 +1139,7 @@ public class TestDelegationTokenRenewer {
     Assert.assertFalse(Renewer.cancelled);
 
     RMApp app3 = rm.submitApp(resource, "name", "user", null, false, null, 2,
-        credentials, null, true, false, false, null, 0, null, true);
+        credentials, null, true, false, false, null, 0, null, true, null);
     MockAM am3 = MockRM.launchAndRegisterAM(app3, rm, nm1);
     rm.waitForState(app3.getApplicationId(), RMAppState.RUNNING);
     Assert.assertTrue(renewer.getAllTokens().containsKey(token1));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c39ca541/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
index 8e5e601..de4d116 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
@@ -759,10 +759,10 @@ public class TestRMWebServicesAppsModification extends JerseyTestBase {
     ApplicationSubmissionContextInfo appInfo = new ApplicationSubmissionContextInfo();
     appInfo.setApplicationId(appId);
     appInfo.setApplicationName(appName);
-    appInfo.setPriority(3);
     appInfo.setMaxAppAttempts(2);
     appInfo.setQueue(queueName);
     appInfo.setApplicationType(appType);
+    appInfo.setPriority(0);
     HashMap<String, LocalResourceInfo> lr =  new HashMap<>();
     LocalResourceInfo y = new LocalResourceInfo();
     y.setUrl(new URI("http://www.test.com/file.txt"));


[18/19] hadoop git commit: YARN-3261. rewrite resourcemanager restart doc to remove roadmap bits (Gururaj Shetty via aw)

Posted by ae...@apache.org.
YARN-3261. rewrite resourcemanager restart doc to remove roadmap bits (Gururaj Shetty via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b7ffc4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b7ffc4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b7ffc4f

Branch: refs/heads/HDFS-7240
Commit: 3b7ffc4f3f0ffb0fa6c324da6d88803f5b233832
Parents: c39ca54
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 21 10:00:20 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 21 10:00:34 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  2 ++
 .../src/site/markdown/ResourceManagerRestart.md | 32 +++++++++-----------
 2 files changed, 16 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b7ffc4f/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7259cf2..79e9ae2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -29,6 +29,8 @@ Trunk - Unreleased
     YARN-2280. Resource manager web service fields are not accessible
     (Krisztian Horvath via aw)
 
+    YARN-3261. rewrite resourcemanager restart doc to remove roadmap bits (Gururaj Shetty via aw)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b7ffc4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
index d23505d..ee222c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRestart.md
@@ -31,34 +31,30 @@ ResourceManger Restart
 Overview
 --------
 
-ResourceManager is the central authority that manages resources and schedules applications running atop of YARN. Hence, it is potentially a single point of failure in a Apache YARN cluster.
-`
-This document gives an overview of ResourceManager Restart, a feature that enhances ResourceManager to keep functioning across restarts and also makes ResourceManager down-time invisible to end-users.
+ResourceManager is the central authority that manages resources and schedules applications running on YARN. Hence, it is potentially a single point of failure in an Apache YARN cluster. This document gives an overview of ResourceManager Restart, a feature that enhances ResourceManager to keep functioning across restarts and also makes ResourceManager down-time invisible to end-users.
 
-ResourceManager Restart feature is divided into two phases: 
+There are two types of restart for ResourceManager:
 
-* **ResourceManager Restart Phase 1 (Non-work-preserving RM restart)**: Enhance RM to persist application/attempt state and other credentials information in a pluggable state-store. RM will reload this information from state-store upon restart and re-kick the previously running applications. Users are not required to re-submit the applications.
+* **Non-work-preserving RM restart**: This restart enhances RM to persist application/attempt state and other credentials information in a pluggable state-store. RM will reload this information from state-store on restart and re-kick the previously running applications. Users are not required to re-submit the applications.
 
-* **ResourceManager Restart Phase 2 (Work-preserving RM restart)**: Focus on re-constructing the running state of ResourceManager by combining the container statuses from NodeManagers and container requests from ApplicationMasters upon restart. The key difference from phase 1 is that previously running applications will not be killed after RM restarts, and so applications won't lose its work because of RM outage.
+* **Work-preserving RM restart**: This focuses on re-constructing the running state of RM by combining the container status from NodeManagers and container requests from ApplicationMasters on restart. The key difference from Non-work-preserving RM restart is that previously running applications will not be killed after RM restarts, and so applications will not lose its work because of RM outage.
 
 Feature
 -------
 
-* **Phase 1: Non-work-preserving RM restart** 
+* **Non-work-preserving RM restart**
 
-     As of Hadoop 2.4.0 release, only ResourceManager Restart Phase 1 is implemented which is described below.
+     In non-work-preserving RM restart, RM will save the application metadata (i.e. ApplicationSubmissionContext) in a pluggable state-store when client submits an application and also saves the final status of the application such as the completion state (failed, killed, or finished) and diagnostics when the application completes. Besides, RM also saves the credentials like security keys, tokens to work in a secure environment. When RM shuts down, as long as the required information (i.e.application metadata and the alongside credentials if running in a secure environment) is available in the state-store, then when RM restarts, it can pick up the application metadata from the state-store and re-submit the application. RM won't re-submit the applications if they were already completed (i.e. failed, killed, or finished) before RM went down.
 
-     The overall concept is that RM will persist the application metadata (i.e. ApplicationSubmissionContext) in a pluggable state-store when client submits an application and also saves the final status of the application such as the completion state (failed, killed, finished) and diagnostics when the application completes. Besides, RM also saves the credentials like security keys, tokens to work in a secure  environment. Any time RM shuts down, as long as the required information (i.e.application metadata and the alongside credentials if running in a secure environment) is available in the state-store, when RM restarts, it can pick up the application metadata from the state-store and re-submit the application. RM won't re-submit the applications if they were already completed (i.e. failed, killed, finished) before RM went down.
+     NodeManagers and clients during the down-time of RM will keep polling RM until RM comes up. When RM comes up, it will send a re-sync command to all the NodeManagers and ApplicationMasters it was talking to via heartbeats. The NMs will kill all its managed containers and re-register with RM. These re-registered NodeManagers are similar to the newly joining NMs. AMs (e.g. MapReduce AM) are expected to shutdown when they receive the re-sync command. After RM restarts and loads all the application metadata, credentials from state-store and populates them into memory, it will create a new attempt (i.e. ApplicationMaster) for each application that was not yet completed and re-kick that application as usual. As described before, the previously running applications' work is lost in this manner since they are essentially killed by RM via the re-sync command on restart.
 
-     NodeManagers and clients during the down-time of RM will keep polling RM until RM comes up. When RM becomes alive, it will send a re-sync command to all the NodeManagers and ApplicationMasters it was talking to via heartbeats. As of Hadoop 2.4.0 release, the behaviors for NodeManagers and ApplicationMasters to handle this command are: NMs will kill all its managed containers and re-register with RM. From the RM's perspective, these re-registered NodeManagers are similar to the newly joining NMs. AMs(e.g. MapReduce AM) are expected to shutdown when they receive the re-sync command. After RM restarts and loads all the application metadata, credentials from state-store and populates them into memory, it will create a new attempt (i.e. ApplicationMaster) for each application that was not yet completed and re-kick that application as usual. As described before, the previously running applications' work is lost in this manner since they are essentially killed by RM via the re-sync co
 mmand on restart.
 
-* **Phase 2: Work-preserving RM restart** 
+* **Work-preserving RM restart**
 
-     As of Hadoop 2.6.0, we further enhanced RM restart feature to address the problem to not kill any applications running on YARN cluster if RM restarts.
+     In work-preserving RM restart, RM ensures the persistency of application state and reload that state on recovery, this restart primarily focuses on re-constructing the entire running state of YARN cluster, the majority of which is the state of the central scheduler inside RM which keeps track of all containers' life-cycle, applications' headroom and resource requests, queues' resource usage and so on. In this way, RM need not kill the AM and re-run the application from scratch as it is done in non-work-preserving RM restart. Applications can simply re-sync back with RM and resume from where it were left off.
 
-     Beyond all the groundwork that has been done in Phase 1 to ensure the persistency of application state and reload that state on recovery, Phase 2 primarily focuses on re-constructing the entire running state of YARN cluster, the majority of which is the state of the central scheduler inside RM which keeps track of all containers' life-cycle, applications' headroom and resource requests, queues' resource usage etc. In this way, RM doesn't need to kill the AM and re-run the application from scratch as it is done in Phase 1. Applications can simply re-sync back with RM and resume from where it were left off.
+     RM recovers its running state by taking advantage of the container status sent from all NMs. NM will not kill the containers when it re-syncs with the restarted RM. It continues managing the containers and sends the container status across to RM when it re-registers. RM reconstructs the container instances and the associated applications' scheduling status by absorbing these containers' information. In the meantime, AM needs to re-send the outstanding resource requests to RM because RM may lose the unfulfilled requests when it shuts down. Application writers using AMRMClient library to communicate with RM do not need to worry about the part of AM re-sending resource requests to RM on re-sync, as it is automatically taken care by the library itself.
 
-     RM recovers its runing state by taking advantage of the container statuses sent from all NMs. NM will not kill the containers when it re-syncs with the restarted RM. It continues managing the containers and send the container statuses across to RM when it re-registers. RM reconstructs the container instances and the associated applications' scheduling status by absorbing these containers' information. In the meantime, AM needs to re-send the outstanding resource requests to RM because RM may lose the unfulfilled requests when it shuts down. Application writers using AMRMClient library to communicate with RM do not need to worry about the part of AM re-sending resource requests to RM on re-sync, as it is automatically taken care by the library itself.
 
 Configurations
 --------------
@@ -103,7 +99,7 @@ This section describes the configurations involved to enable RM Restart feature.
 | `yarn.resourcemanager.fs.state-store.retry-policy-spec` | Hadoop FileSystem client retry policy specification. Hadoop FileSystem client retry is always enabled. Specified in pairs of sleep-time and number-of-retries i.e. (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on. Default value is (2000, 500) |
 
 ### Configurations for ZooKeeper based state-store implementation
-  
+
 * Configure the ZooKeeper server address and the root path where the RM state is stored.
 
 | Property | Description |
@@ -145,7 +141,7 @@ ContainerId string format is changed if RM restarts with work-preserving recover
 
 It is now changed to:
 `Container_`**e{epoch}**`_{clusterTimestamp}_{appId}_{attemptId}_{containerId}`, e.g. `Container_`**e17**`_1410901177871_0001_01_000005`.
- 
+
 Here, the additional epoch number is a monotonically increasing integer which starts from 0 and is increased by 1 each time RM restarts. If epoch number is 0, it is omitted and the containerId string format stays the same as before.
 
 Sample Configurations
@@ -155,12 +151,12 @@ Below is a minimum set of configurations for enabling RM work-preserving restart
 
 
      <property>
-       <description>Enable RM to recover state after starting. If true, then 
+       <description>Enable RM to recover state after starting. If true, then
        yarn.resourcemanager.store.class must be specified</description>
        <name>yarn.resourcemanager.recovery.enabled</name>
        <value>true</value>
      </property>
-   
+
      <property>
        <description>The class to use as the persistent store.</description>
        <name>yarn.resourcemanager.store.class</name>


[03/19] hadoop git commit: HADOOP-12209 Comparable type should be in FileStatus. (Yong Zhang via stevel)

Posted by ae...@apache.org.
HADOOP-12209 Comparable type should be in FileStatus.   (Yong Zhang via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9141e1aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9141e1aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9141e1aa

Branch: refs/heads/HDFS-7240
Commit: 9141e1aa16561e44f73e00b349735f530c94acc3
Parents: 05130e9
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jul 20 12:32:32 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jul 20 12:32:44 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/fs/FileStatus.java   | 15 +++++--------
 .../org/apache/hadoop/fs/LocatedFileStatus.java | 10 +++------
 .../fs/viewfs/ViewFsLocatedFileStatus.java      |  3 ++-
 .../org/apache/hadoop/fs/TestFileStatus.java    | 22 ++++++++++++++++++++
 5 files changed, 35 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 481d7de..18475b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -972,6 +972,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12235 hadoop-openstack junit & mockito dependencies should be
     "provided". (Ted Yu via stevel)
 
+    HADOOP-12209 Comparable type should be in FileStatus.
+    (Yong Zhang via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 98757a7..6a79768 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable {
+public class FileStatus implements Writable, Comparable<FileStatus> {
 
   private Path path;
   private long length;
@@ -323,19 +323,14 @@ public class FileStatus implements Writable, Comparable {
   }
 
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
    * @return  a negative integer, zero, or a positive integer as this object
    *   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   *         type FileStatus
    */
   @Override
-  public int compareTo(Object o) {
-    FileStatus other = (FileStatus)o;
-    return this.getPath().compareTo(other.getPath());
+  public int compareTo(FileStatus o) {
+    return this.getPath().compareTo(o.getPath());
   }
   
   /** Compare if this object is equal to another object

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 9e920c5..588fd6a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -90,17 +90,13 @@ public class LocatedFileStatus extends FileStatus {
   }
   
   /**
-   * Compare this object to another object
-   * 
-   * @param   o the object to be compared.
+   * Compare this FileStatus to another FileStatus
+   * @param   o the FileStatus to be compared.
    * @return  a negative integer, zero, or a positive integer as this object
    *   is less than, equal to, or greater than the specified object.
-   * 
-   * @throws ClassCastException if the specified object's is not of 
-   *         type FileStatus
    */
   @Override
-  public int compareTo(Object o) {
+  public int compareTo(FileStatus o) {
     return super.compareTo(o);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java
index 347a809..4e681a7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -120,7 +121,7 @@ class ViewFsLocatedFileStatus extends LocatedFileStatus {
   }
 
   @Override
-  public int compareTo(Object o) {
+  public int compareTo(FileStatus o) {
     return super.compareTo(o);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9141e1aa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
index 5614dd6..dd5279d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
@@ -26,6 +26,9 @@ import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
 
 import org.junit.Test;
 import org.apache.commons.logging.Log;
@@ -183,6 +186,25 @@ public class TestFileStatus {
     validateToString(fileStatus);
   }
   
+  @Test
+  public void testCompareTo() throws IOException {
+    Path path1 = new Path("path1");
+    Path path2 = new Path("path2");
+    FileStatus fileStatus1 =
+        new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
+            "one", "one", null, path1);
+    FileStatus fileStatus2 =
+        new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
+            "one", "one", null, path2);
+    assertTrue(fileStatus1.compareTo(fileStatus2) < 0);
+    assertTrue(fileStatus2.compareTo(fileStatus1) > 0);
+
+    List<FileStatus> statList = new ArrayList<>();
+    statList.add(fileStatus1);
+    statList.add(fileStatus2);
+    assertTrue(Collections.binarySearch(statList, fileStatus1) > -1);
+  }
+
   /**
    * Check that toString produces the expected output for a symlink.
    */


[02/19] hadoop git commit: HADOOP-12235 hadoop-openstack junit & mockito dependencies should be "provided". (Ted Yu via stevel)

Posted by ae...@apache.org.
HADOOP-12235 hadoop-openstack junit & mockito dependencies should be "provided". (Ted Yu via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05130e94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05130e94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05130e94

Branch: refs/heads/HDFS-7240
Commit: 05130e94c5223a8ed70a7fb5d1398e5d536f5f03
Parents: 176131f
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jul 20 11:22:22 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jul 20 11:22:39 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-tools/hadoop-openstack/pom.xml           | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05130e94/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 62703c3..481d7de 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -969,6 +969,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12240. Fix tests requiring native library to be skipped in non-native
     profile. (Masatake Iwasaki via ozawa)
 
+    HADOOP-12235 hadoop-openstack junit & mockito dependencies should be
+    "provided". (Ted Yu via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05130e94/hadoop-tools/hadoop-openstack/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml
index afdda99..1b541e2 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -128,12 +128,12 @@
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
-      <scope>compile</scope>
+      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
-      <scope>compile</scope>
+      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>


[07/19] hadoop git commit: HDFS-8344. NameNode doesn't recover lease for files with missing blocks (raviprak)

Posted by ae...@apache.org.
HDFS-8344. NameNode doesn't recover lease for files with missing blocks (raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4f75626
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4f75626
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4f75626

Branch: refs/heads/HDFS-7240
Commit: e4f756260f16156179ba4adad974ec92279c2fac
Parents: 98c2bc8
Author: Ravi Prakash <ra...@altiscale.com>
Authored: Mon Jul 20 14:03:34 2015 -0700
Committer: Ravi Prakash <ra...@altiscale.com>
Committed: Mon Jul 20 14:03:34 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  3 +
 .../BlockInfoUnderConstruction.java             | 19 ++++-
 .../server/blockmanagement/BlockManager.java    | 14 +++-
 .../hdfs/server/namenode/FSNamesystem.java      | 10 +++
 .../src/main/resources/hdfs-default.xml         |  9 +++
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 78 ++++++++++++++++++++
 7 files changed, 132 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 58491a6..13d9969 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1050,6 +1050,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock.
     (Arpit Agarwal)
 
+    HDFS-8344. NameNode doesn't recover lease for files with missing blocks
+    (raviprak)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0e569f0..210d1e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -440,6 +440,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 1000;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit";
   public static final int     DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 1000;
+  public static final String  DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS = "dfs.block.uc.max.recovery.attempts";
+  public static final int     DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS_DEFAULT = 5;
+
   public static final String  DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max";
   public static final int     DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
   /* Maximum number of blocks to process for initializing replication queues */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 9cd3987..28f1633 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
@@ -61,6 +60,11 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
    */
   protected Block truncateBlock;
 
+  /** The number of times all replicas will be used to attempt recovery before
+   * giving up and marking the block under construction missing.
+   */
+  private int recoveryAttemptsBeforeMarkingBlockMissing;
+
   /**
    * ReplicaUnderConstruction contains information about replicas while
    * they are under construction.
@@ -174,6 +178,8 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
         "BlockInfoUnderConstruction cannot be in COMPLETE state");
     this.blockUCState = state;
     setExpectedLocations(targets);
+    this.recoveryAttemptsBeforeMarkingBlockMissing =
+      BlockManager.getMaxBlockUCRecoveries();
   }
 
   /** Set expected locations. */
@@ -271,7 +277,7 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
     if (replicas.size() == 0) {
       NameNode.blockStateChangeLog.warn("BLOCK* " +
           "BlockInfoUnderConstruction.initLeaseRecovery: " +
-          "No blocks found, lease removed.");
+          "No replicas found.");
     }
     boolean allLiveReplicasTriedAsPrimary = true;
     for (int i = 0; i < replicas.size(); i++) {
@@ -283,6 +289,11 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
       }
     }
     if (allLiveReplicasTriedAsPrimary) {
+      recoveryAttemptsBeforeMarkingBlockMissing--;
+      NameNode.blockStateChangeLog.info("Tried to recover " + this +" using all"
+          + " replicas. Will try " + recoveryAttemptsBeforeMarkingBlockMissing
+          + " more times");
+
       // Just set all the replicas to be chosen whether they are alive or not.
       for (int i = 0; i < replicas.size(); i++) {
         replicas.get(i).setChosenAsPrimary(false);
@@ -341,6 +352,10 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
     replicas.add(new ReplicaUnderConstruction(block, storage, rState));
   }
 
+  public int getNumRecoveryAttemptsLeft() {
+    return recoveryAttemptsBeforeMarkingBlockMissing;
+  }
+
   /**
    * Convert an under construction block to a complete block.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7dce2a8..16dfe45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import java.io.IOException;
@@ -274,6 +273,13 @@ public class BlockManager implements BlockStatsMXBean {
   private BlockPlacementPolicy blockplacement;
   private final BlockStoragePolicySuite storagePolicySuite;
 
+  /** The number of times a block under construction's recovery will be
+    * attempted using all known replicas. e.g. if there are 3 replicas, each
+    * node will be tried 5 times (for a total of 15 retries across all nodes)*/
+  private static int maxBlockUCRecoveries =
+    DFSConfigKeys.DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS_DEFAULT;
+  public static int getMaxBlockUCRecoveries() { return maxBlockUCRecoveries; }
+
   /** Check whether name system is running before terminating */
   private boolean checkNSRunning = true;
 
@@ -282,6 +288,9 @@ public class BlockManager implements BlockStatsMXBean {
     this.namesystem = namesystem;
     datanodeManager = new DatanodeManager(this, namesystem, conf);
     heartbeatManager = datanodeManager.getHeartbeatManager();
+    maxBlockUCRecoveries = conf.getInt(
+      DFSConfigKeys.DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS,
+      DFSConfigKeys.DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS_DEFAULT);
 
     startupDelayBlockDeletionInMs = conf.getLong(
         DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -731,7 +740,8 @@ public class BlockManager implements BlockStatsMXBean {
   /**
    * Force the given block in the given file to be marked as complete,
    * regardless of whether enough replicas are present. This is necessary
-   * when tailing edit logs as a Standby.
+   * when tailing edit logs as a Standby or when recovering a lease on a file
+   * with missing blocks.
    */
   public BlockInfo forceCompleteBlock(final BlockCollection bc,
       final BlockInfoUnderConstruction block) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7c6d6a1..becd05a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3287,6 +3287,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             + "Removed empty last block and closed file.");
         return true;
       }
+
+      //If the block's recovery has been attempted enough times, mark the block
+      //complete anyway and recover the lease
+      if(uc.getNumRecoveryAttemptsLeft() == 0) {
+        blockManager.forceCompleteBlock(pendingFile, uc);
+        finalizeINodeFileUnderConstruction(src, pendingFile,
+            iip.getLatestSnapshotId());
+        return true;
+      }
+
       // start recovery of the last block for this file
       long blockRecoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(uc));
       lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 8cb7d5f..dee9111 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -425,6 +425,15 @@
 </property>
 
 <property>
+  <name>dfs.block.uc.max.recovery.attempts</name>
+  <value>5</value>
+  <description>The number of times a block under construction's recovery will be
+  attempted using all known replicas. e.g. if there are 3 replicas, each node
+  will be tried 5 times (for a total of 15 retries across all nodes).
+  </description>
+</property>
+
+<property>
   <name>dfs.datanode.data.dir</name>
   <value>file://${hadoop.tmp.dir}/dfs/data</value>
   <description>Determines where on the local filesystem an DFS data node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f75626/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index c9f3842..c9448ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -27,6 +27,7 @@ import java.util.EnumSet;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -258,4 +259,81 @@ public class TestLeaseRecovery {
       }
     }
   }
+
+  /**
+   * Test that when a client was writing to a file and died, and before the
+   * lease can be recovered, all the datanodes to which the file was written
+   * also die, after some time (5 * lease recovery times) the file is indeed
+   * closed and lease recovered.
+   * We also check that if the datanode came back after some time, the data
+   * originally written is not truncated
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testLeaseRecoveryWithMissingBlocks()
+    throws IOException, InterruptedException {
+    Configuration conf = new HdfsConfiguration();
+
+    //Start a cluster with 3 datanodes
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster.setLeasePeriod(LEASE_PERIOD, LEASE_PERIOD);
+    cluster.waitActive();
+
+    //create a file (with replication 1)
+    Path file = new Path("/testRecoveryFile");
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    FSDataOutputStream out = dfs.create(file, (short) 1);
+
+    //This keeps count of the number of bytes written (AND is also the data we
+    //are writing)
+    long writtenBytes = 0;
+    while (writtenBytes < 2 * 1024 * 1024) {
+      out.writeLong(writtenBytes);
+      writtenBytes += 8;
+    }
+    System.out.println("Written " + writtenBytes + " bytes");
+    out.hsync();
+    System.out.println("hsynced the data");
+
+    //Kill the datanode to which the file was written.
+    DatanodeInfo dn =
+      ((DFSOutputStream) out.getWrappedStream()).getPipeline()[0];
+    DataNodeProperties dnStopped = cluster.stopDataNode(dn.getName());
+
+    //Wait at most 20 seconds for the lease to be recovered
+    LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
+    int i = 40;
+    while(i-- > 0 && lm.countLease() != 0) {
+      System.out.println("Still got " + lm.countLease() + " lease(s)");
+      Thread.sleep(500);
+    }
+    assertTrue("The lease was not recovered", lm.countLease() == 0);
+    System.out.println("Got " + lm.countLease() + " leases");
+
+    //Make sure we can't read any data because the datanode is dead
+    FSDataInputStream in = dfs.open(file);
+    try {
+      in.readLong();
+      assertTrue("Shouldn't have reached here", false);
+    } catch(BlockMissingException bme) {
+      System.out.println("Correctly got BlockMissingException because datanode"
+        + " is still dead");
+    }
+
+    //Bring the dead datanode back.
+    cluster.restartDataNode(dnStopped);
+    System.out.println("Restart datanode");
+
+    //Make sure we can read all the data back (since we hsync'ed).
+    in = dfs.open(file);
+    int readBytes = 0;
+    while(in.available() != 0) {
+      assertEquals("Didn't read the data we wrote", in.readLong(), readBytes);
+      readBytes += 8;
+    }
+    assertEquals("Didn't get all the data", readBytes, writtenBytes);
+    System.out.println("Read back all the " + readBytes + " bytes");
+  }
+
 }


[10/19] hadoop git commit: HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux. (aajisaka)

Posted by ae...@apache.org.
HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/773c6709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/773c6709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/773c6709

Branch: refs/heads/HDFS-7240
Commit: 773c670943757681feeafb227a2d0c29d48f38f1
Parents: d6d5860
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 21 11:21:49 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 21 11:21:49 2015 +0900

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../java/org/apache/hadoop/security/UserGroupInformation.java     | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/773c6709/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a23a508..ef8e238 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -705,6 +705,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11893. Mark org.apache.hadoop.security.token.Token as
     @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)
 
+    HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux.
+    (aajisaka)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773c6709/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index be3d60d..80a0898 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -369,7 +369,8 @@ public class UserGroupInformation {
   private static final boolean windows =
       System.getProperty("os.name").startsWith("Windows");
   private static final boolean is64Bit =
-      System.getProperty("os.arch").contains("64");
+      System.getProperty("os.arch").contains("64") ||
+      System.getProperty("os.arch").contains("s390x");
   private static final boolean aix = System.getProperty("os.name").equals("AIX");
 
   /* Return the OS login module class name */


[16/19] hadoop git commit: YARN-3915. scmadmin help message correction (Bibin A Chundatt via aw)

Posted by ae...@apache.org.
YARN-3915. scmadmin help message correction  (Bibin A Chundatt via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da2d1ac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da2d1ac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da2d1ac4

Branch: refs/heads/HDFS-7240
Commit: da2d1ac4bc0bf0812b9a2a1ffbb7748113cdaf6d
Parents: c9507fe
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 21 09:44:45 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 21 09:44:45 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                    | 2 ++
 .../src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java      | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da2d1ac4/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e6a3343..d0829c1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -51,6 +51,8 @@ Trunk - Unreleased
     YARN-2355. MAX_APP_ATTEMPTS_ENV may no longer be a useful env var
     for a container (Darrell Taylor via aw)
 
+    YARN-3915. scmadmin help message correction (Bibin A Chundatt via aw)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da2d1ac4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
index 1e45c5a..dc6bf48 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
@@ -52,7 +52,7 @@ public class SCMAdmin extends Configured implements Tool {
     String summary = "scmadmin is the command to execute shared cache manager" +
         "administrative commands.\n" +
         "The full syntax is: \n\n" +
-        "hadoop scmadmin" +
+        "yarn scmadmin" +
         " [-runCleanerTask]" +
         " [-help [cmd]]\n";
 


[13/19] hadoop git commit: HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options. (Contributed by Vinayakumar B)

Posted by ae...@apache.org.
HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87f29c6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87f29c6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87f29c6b

Branch: refs/heads/HDFS-7240
Commit: 87f29c6b8acc07cc011713a79554d51945e265ac
Parents: df1e8ce
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Jul 21 13:12:46 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Jul 21 13:12:46 2015 +0530

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../org/apache/hadoop/tools/HadoopArchives.java | 87 +++++++++++++-------
 .../apache/hadoop/tools/TestHadoopArchives.java |  4 +-
 3 files changed, 64 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f29c6b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ef8e238..24709e0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -708,6 +708,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12081. Fix UserGroupInformation.java to support 64-bit zLinux.
     (aajisaka)
 
+    HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options.
+    (vinayakumarb)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f29c6b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
index f00bb6d..330830b 100644
--- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
+++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
@@ -33,6 +33,11 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.Parser;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -81,6 +86,10 @@ public class HadoopArchives implements Tool {
   private static final Log LOG = LogFactory.getLog(HadoopArchives.class);
   
   private static final String NAME = "har"; 
+  private static final String ARCHIVE_NAME = "archiveName";
+  private static final String REPLICATION = "r";
+  private static final String PARENT_PATH = "p";
+  private static final String HELP = "help";
   static final String SRC_LIST_LABEL = NAME + ".src.list";
   static final String DST_DIR_LABEL = NAME + ".dest.path";
   static final String TMP_DIR_LABEL = NAME + ".tmp.dir";
@@ -101,9 +110,9 @@ public class HadoopArchives implements Tool {
   /** the desired replication degree; default is 10 **/
   short repl = 10;
 
-  private static final String usage = "Usage: archive"
-  + " -archiveName <NAME>.har -p <parent path> [-r <replication factor>]" +
-      "<src>* <dest>" +
+  private static final String usage = "archive"
+  + " <-archiveName <NAME>.har> <-p <parent path>> [-r <replication factor>]" +
+      " <src>* <dest>" +
   "\n";
   
  
@@ -794,7 +803,17 @@ public class HadoopArchives implements Tool {
     }
     
   }
-  
+
+  private void printUsage(Options opts, boolean printDetailed) {
+    HelpFormatter helpFormatter = new HelpFormatter();
+    if (printDetailed) {
+      helpFormatter.printHelp(usage.length() + 10, usage, null, opts, null,
+          false);
+    } else {
+      System.out.println(usage);
+    }
+  }
+
   /** the main driver for creating the archives
    *  it takes at least three command line parameters. The parent path, 
    *  The src and the dest. It does an lsr on the source paths.
@@ -804,43 +823,51 @@ public class HadoopArchives implements Tool {
 
   public int run(String[] args) throws Exception {
     try {
-      Path parentPath = null;
-      List<Path> srcPaths = new ArrayList<Path>();
-      Path destPath = null;
-      String archiveName = null;
-      if (args.length < 5) {
-        System.out.println(usage);
-        throw new IOException("Invalid usage.");
-      }
-      if (!"-archiveName".equals(args[0])) {
-        System.out.println(usage);
+      // Parse CLI options
+      Options options = new Options();
+      options.addOption(ARCHIVE_NAME, true,
+          "Name of the Archive. This is mandatory option");
+      options.addOption(PARENT_PATH, true,
+          "Parent path of sources. This is mandatory option");
+      options.addOption(REPLICATION, true, "Replication factor archive files");
+      options.addOption(HELP, false, "Show the usage");
+      Parser parser = new GnuParser();
+      CommandLine commandLine = parser.parse(options, args, true);
+
+      if (commandLine.hasOption(HELP)) {
+        printUsage(options, true);
+        return 0;
+      }
+      if (!commandLine.hasOption(ARCHIVE_NAME)) {
+        printUsage(options, false);
         throw new IOException("Archive Name not specified.");
       }
-      archiveName = args[1];
+      String archiveName = commandLine.getOptionValue(ARCHIVE_NAME);
       if (!checkValidName(archiveName)) {
-        System.out.println(usage);
+        printUsage(options, false);
         throw new IOException("Invalid name for archives. " + archiveName);
       }
-      int i = 2;
       //check to see if relative parent has been provided or not
       //this is a required parameter. 
-      if (! "-p".equals(args[i])) {
-        System.out.println(usage);
+      if (!commandLine.hasOption(PARENT_PATH)) {
+        printUsage(options, false);
         throw new IOException("Parent path not specified.");
       }
-      parentPath = new Path(args[i+1]);
+      Path parentPath = new Path(commandLine.getOptionValue(PARENT_PATH));
       if (!parentPath.isAbsolute()) {
-        parentPath= parentPath.getFileSystem(getConf()).makeQualified(parentPath);
+        parentPath = parentPath.getFileSystem(getConf()).makeQualified(
+            parentPath);
       }
 
-      i+=2;
-
-      if ("-r".equals(args[i])) {
-        repl = Short.parseShort(args[i+1]);
-        i+=2;
+      if (commandLine.hasOption(REPLICATION)) {
+        repl = Short.parseShort(commandLine.getOptionValue(REPLICATION));
       }
+      // Remaining args
+      args = commandLine.getArgs();
+      List<Path> srcPaths = new ArrayList<Path>();
+      Path destPath = null;
       //read the rest of the paths
-      for (; i < args.length; i++) {
+      for (int i = 0; i < args.length; i++) {
         if (i == (args.length - 1)) {
           destPath = new Path(args[i]);
           if (!destPath.isAbsolute()) {
@@ -850,13 +877,17 @@ public class HadoopArchives implements Tool {
         else {
           Path argPath = new Path(args[i]);
           if (argPath.isAbsolute()) {
-            System.out.println(usage);
+            printUsage(options, false);
             throw new IOException("Source path " + argPath +
                 " is not relative to "+ parentPath);
           }
           srcPaths.add(new Path(parentPath, argPath));
         }
       }
+      if (destPath == null) {
+        printUsage(options, false);
+        throw new IOException("Destination path not specified.");
+      }
       if (srcPaths.size() == 0) {
         // assuming if the user does not specify path for sources
         // the whole parent directory needs to be archived. 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87f29c6b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index 101cb06..d8222dc 100644
--- a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -753,8 +753,8 @@ public class TestHadoopArchives {
 
     final String harName = "foo.har";
     final String fullHarPathStr = prefix + harName;
-    final String[] args = { "-archiveName", harName, "-p", inputPathStr,
-        "-r 3", "*", archivePath.toString() };
+    final String[] args = { "-archiveName", harName, "-p", inputPathStr, "-r",
+        "3", "*", archivePath.toString() };
     System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
         HADOOP_ARCHIVES_JAR);
     final HadoopArchives har = new HadoopArchives(conf);


[15/19] hadoop git commit: HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)

Posted by ae...@apache.org.
HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9507fe6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9507fe6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9507fe6

Branch: refs/heads/HDFS-7240
Commit: c9507fe6c12491f3aef5cd4142b4d466bd6b71c3
Parents: 29cf887b
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 21 09:41:28 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 21 09:41:28 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt       | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 8 --------
 2 files changed, 2 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9507fe6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6c91c45..223baaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -343,6 +343,8 @@ Trunk (Unreleased)
 
     HDFS-8657. Update docs for mSNN. (Jesse Yates via atm)
 
+    HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9507fe6/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index a996a80..23a08be 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -274,14 +274,6 @@ fi
 hadoop_finalize
 
 if [[ -n "${supportdaemonization}" ]]; then
-  if [[ "${COMMAND}" == "namenode" ]] &&
-     [[ "${HADOOP_DAEMON_MODE}" == "stop" ]]; then
-    hadoop_debug "Do checkpoint if necessary before stopping NameNode"
-    export CLASSPATH
-    "${JAVA}" "-Dproc_dfsadmin" ${HADOOP_OPTS} "org.apache.hadoop.hdfs.tools.DFSAdmin" "-safemode" "enter"
-    "${JAVA}" "-Dproc_dfsadmin" ${HADOOP_OPTS} "org.apache.hadoop.hdfs.tools.DFSAdmin" "-saveNamespace" "-beforeShutdown"
-    "${JAVA}" "-Dproc_dfsadmin" ${HADOOP_OPTS} "org.apache.hadoop.hdfs.tools.DFSAdmin" "-safemode" "leave"
-  fi
   if [[ -n "${secure_service}" ]]; then
     hadoop_secure_daemon_handler \
     "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\


[11/19] hadoop git commit: Move HDFS-6945 to 2.7.2 section in CHANGES.txt.

Posted by ae...@apache.org.
Move HDFS-6945 to 2.7.2 section in CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a628f675
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a628f675
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a628f675

Branch: refs/heads/HDFS-7240
Commit: a628f675900d2533ddf86fb3d3e601238ecd68c3
Parents: 773c670
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 21 11:45:00 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 21 11:45:00 2015 +0900

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a628f675/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 388b553..1293388 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -762,9 +762,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-7997. The first non-existing xattr should also throw IOException.
     (zhouyingchao via yliu)
 
-    HDFS-6945. BlockManager should remove a block from excessReplicateMap and
-    decrement ExcessBlocks metric when the block is removed. (aajisaka)
-
     HDFS-7922. ShortCircuitCache#close is not releasing
     ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
@@ -1072,8 +1069,11 @@ Release 2.7.2 - UNRELEASED
   HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
 
   BUG FIXES
-    
-Release 2.7.1 - 2015-07-06 
+
+    HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+    decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
+Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES
 


[09/19] hadoop git commit: HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)

Posted by ae...@apache.org.
HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d58606
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d58606
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d58606

Branch: refs/heads/HDFS-7240
Commit: d6d58606b8adf94b208aed5fc2d054b9dd081db1
Parents: ed01dc7
Author: yliu <yl...@apache.org>
Authored: Tue Jul 21 09:20:22 2015 +0800
Committer: yliu <yl...@apache.org>
Committed: Tue Jul 21 09:20:22 2015 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt      |  2 ++
 .../blockmanagement/CorruptReplicasMap.java      | 19 ++++++++++++++-----
 .../blockmanagement/TestCorruptReplicaInfo.java  | 12 ++++++------
 3 files changed, 22 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cd32c0e..388b553 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -727,6 +727,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
     files rather than the entire DFSClient. (mingma)
 
+    HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fc2e234..f83cbaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,12 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.Server;
 
-import java.util.*;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Stores information about all corrupt blocks in the File System.
@@ -46,8 +53,8 @@ public class CorruptReplicasMap{
     CORRUPTION_REPORTED  // client or datanode reported the corruption
   }
 
-  private final SortedMap<Block, Map<DatanodeDescriptor, Reason>> corruptReplicasMap =
-    new TreeMap<Block, Map<DatanodeDescriptor, Reason>>();
+  private final Map<Block, Map<DatanodeDescriptor, Reason>> corruptReplicasMap =
+    new HashMap<Block, Map<DatanodeDescriptor, Reason>>();
 
   /**
    * Mark the block belonging to datanode as corrupt.
@@ -181,13 +188,15 @@ public class CorruptReplicasMap{
    * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
    *
    */
-  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+  @VisibleForTesting
+  long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks,
                                    Long startingBlockId) {
     if (numExpectedBlocks < 0 || numExpectedBlocks > 100) {
       return null;
     }
     
-    Iterator<Block> blockIt = corruptReplicasMap.keySet().iterator();
+    Iterator<Block> blockIt = 
+        new TreeMap<>(corruptReplicasMap).keySet().iterator();
     
     // if the starting block id was specified, iterate over keys until
     // we find the matching block. If we find a matching block, break

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d58606/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 21fb54e..4bdaaac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
@@ -73,9 +73,9 @@ public class TestCorruptReplicaInfo {
       
       // Make sure initial values are returned correctly
       assertEquals("Number of corrupt blocks must initially be 0", 0, crm.size());
-      assertNull("Param n cannot be less than 0", crm.getCorruptReplicaBlockIds(-1, null));
-      assertNull("Param n cannot be greater than 100", crm.getCorruptReplicaBlockIds(101, null));
-      long[] l = crm.getCorruptReplicaBlockIds(0, null);
+      assertNull("Param n cannot be less than 0", crm.getCorruptReplicaBlockIdsForTesting(-1, null));
+      assertNull("Param n cannot be greater than 100", crm.getCorruptReplicaBlockIdsForTesting(101, null));
+      long[] l = crm.getCorruptReplicaBlockIdsForTesting(0, null);
       assertNotNull("n = 0 must return non-null", l);
       assertEquals("n = 0 must return an empty list", 0, l.length);
 
@@ -118,14 +118,14 @@ public class TestCorruptReplicaInfo {
       
       assertTrue("First five block ids not returned correctly ",
                 Arrays.equals(new long[]{0,1,2,3,4},
-                              crm.getCorruptReplicaBlockIds(5, null)));
+                              crm.getCorruptReplicaBlockIdsForTesting(5, null)));
                               
-      LOG.info(crm.getCorruptReplicaBlockIds(10, 7L));
+      LOG.info(crm.getCorruptReplicaBlockIdsForTesting(10, 7L));
       LOG.info(block_ids.subList(7, 18));
 
       assertTrue("10 blocks after 7 not returned correctly ",
                 Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},
-                              crm.getCorruptReplicaBlockIds(10, 7L)));
+                              crm.getCorruptReplicaBlockIdsForTesting(10, 7L)));
       
   }
   


[04/19] hadoop git commit: HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json"). (Brahma Reddy Battula via stevel)

Posted by ae...@apache.org.
HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json").   (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05fa3368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05fa3368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05fa3368

Branch: refs/heads/HDFS-7240
Commit: 05fa3368f12d189a95a2d6cd8eebc6f7e3a719ee
Parents: 9141e1a
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jul 20 13:02:51 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jul 20 13:03:03 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 .../java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 18475b9..bfa9aac 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -975,6 +975,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12209 Comparable type should be in FileStatus.
     (Yong Zhang via stevel)
 
+    HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json").
+    (Brahma Reddy Battula via stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05fa3368/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 223e69a..1ffc44d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -544,7 +544,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       // AuthenticatedURL properly to set authToken post initialization)
     }
     HttpExceptionUtils.validateResponse(conn, expectedResponse);
-    if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
+    if (conn.getContentType() != null
+        && conn.getContentType().trim().toLowerCase()
+            .startsWith(APPLICATION_JSON_MIME)
         && klass != null) {
       ObjectMapper mapper = new ObjectMapper();
       InputStream is = null;


[19/19] hadoop git commit: Merge branch 'trunk' into HDFS-7240

Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942e1ac2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942e1ac2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942e1ac2

Branch: refs/heads/HDFS-7240
Commit: 942e1ac213c86956c9f8b6fdf3c38e4dc5a84238
Parents: 8576861 3b7ffc4
Author: Anu Engineer <an...@gmail.com>
Authored: Tue Jul 21 11:14:40 2015 -0700
Committer: Anu Engineer <an...@gmail.com>
Committed: Tue Jul 21 11:14:40 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  21 ++
 .../crypto/key/kms/KMSClientProvider.java       |   4 +-
 .../java/org/apache/hadoop/fs/FileStatus.java   |  15 +-
 .../org/apache/hadoop/fs/LocatedFileStatus.java |  10 +-
 .../fs/viewfs/ViewFsLocatedFileStatus.java      |   3 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java    |   2 +-
 .../hadoop/security/UserGroupInformation.java   |   3 +-
 .../hadoop/security/token/SecretManager.java    |   2 +-
 .../org/apache/hadoop/security/token/Token.java |   2 +-
 .../hadoop/security/token/TokenIdentifier.java  |   2 +-
 .../apache/hadoop/security/token/TokenInfo.java |   2 +-
 .../hadoop/security/token/TokenRenewer.java     |   2 +-
 .../hadoop/security/token/TokenSelector.java    |   2 +-
 .../hadoop/security/token/package-info.java     |   2 +-
 .../org/apache/hadoop/fs/TestFileStatus.java    |  22 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  25 +-
 .../hadoop-hdfs/src/main/bin/hdfs               |   8 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 +
 .../BlockInfoUnderConstruction.java             |  19 +-
 .../server/blockmanagement/BlockManager.java    |  14 +-
 .../blockmanagement/CorruptReplicasMap.java     |  19 +-
 .../hdfs/server/namenode/AclTransformation.java |  30 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  10 +
 .../src/main/resources/hdfs-default.xml         |   9 +
 .../src/main/webapps/hdfs/dfshealth.html        |  25 ++
 .../src/main/webapps/hdfs/dfshealth.js          |   8 +
 .../markdown/HDFSHighAvailabilityWithNFS.md     |  40 ++-
 .../markdown/HDFSHighAvailabilityWithQJM.md     |  32 +-
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   |  78 +++++
 .../blockmanagement/TestBlockStatsMXBean.java   |   1 -
 .../blockmanagement/TestCorruptReplicaInfo.java |  12 +-
 .../server/namenode/TestAclTransformation.java  |  55 ++-
 .../org/apache/hadoop/tools/HadoopArchives.java |  87 +++--
 .../apache/hadoop/tools/TestHadoopArchives.java |   4 +-
 hadoop-tools/hadoop-openstack/pom.xml           |   4 +-
 .../sls/scheduler/ResourceSchedulerWrapper.java |  10 +
 hadoop-yarn-project/CHANGES.txt                 |  13 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |   5 +
 .../org/apache/hadoop/yarn/client/SCMAdmin.java |   2 +-
 .../server/resourcemanager/RMAppManager.java    |  20 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  15 +-
 .../scheduler/AbstractYarnScheduler.java        |  10 +
 .../server/resourcemanager/scheduler/Queue.java |   8 +
 .../scheduler/SchedulerApplication.java         |  22 ++
 .../scheduler/SchedulerApplicationAttempt.java  |  15 +-
 .../scheduler/YarnScheduler.java                |  20 ++
 .../scheduler/capacity/AbstractCSQueue.java     |   7 +
 .../scheduler/capacity/CapacityScheduler.java   |  73 +++-
 .../CapacitySchedulerConfiguration.java         |  13 +
 .../scheduler/capacity/LeafQueue.java           |  19 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   8 +
 .../scheduler/event/AppAddedSchedulerEvent.java |  28 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |   6 +
 .../scheduler/fifo/FifoScheduler.java           |   6 +
 .../scheduler/policy/FifoComparator.java        |  11 +-
 .../scheduler/policy/SchedulableEntity.java     |   5 +
 .../yarn/server/resourcemanager/MockRM.java     |  31 +-
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../TestWorkPreservingRMRestart.java            |   2 +-
 ...pacityPreemptionPolicyForNodePartitions.java |   1 +
 .../capacity/TestApplicationLimits.java         |   5 +-
 .../capacity/TestApplicationPriority.java       | 345 +++++++++++++++++++
 .../capacity/TestCapacityScheduler.java         |   5 +
 .../scheduler/policy/MockSchedulableEntity.java |  13 +-
 .../security/TestDelegationTokenRenewer.java    |  10 +-
 .../TestRMWebServicesAppsModification.java      |   2 +-
 .../src/site/markdown/ResourceManagerRestart.md |  32 +-
 67 files changed, 1133 insertions(+), 207 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942e1ac2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------


[06/19] hadoop git commit: HADOOP-11893. Mark org.apache.hadoop.security.token.Token as @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)

Posted by ae...@apache.org.
HADOOP-11893. Mark org.apache.hadoop.security.token.Token as @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98c2bc87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98c2bc87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98c2bc87

Branch: refs/heads/HDFS-7240
Commit: 98c2bc87b1445c533268c58d382ea4e4297303fd
Parents: a943142
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jul 20 13:22:03 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jul 20 13:22:14 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../main/java/org/apache/hadoop/security/token/SecretManager.java | 2 +-
 .../src/main/java/org/apache/hadoop/security/token/Token.java     | 2 +-
 .../java/org/apache/hadoop/security/token/TokenIdentifier.java    | 2 +-
 .../src/main/java/org/apache/hadoop/security/token/TokenInfo.java | 2 +-
 .../main/java/org/apache/hadoop/security/token/TokenRenewer.java  | 2 +-
 .../main/java/org/apache/hadoop/security/token/TokenSelector.java | 2 +-
 .../main/java/org/apache/hadoop/security/token/package-info.java  | 2 +-
 8 files changed, 10 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1b643a9..a23a508 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -702,6 +702,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12153. ByteBufferReadable doesn't declare @InterfaceAudience and
     @InterfaceStability. (Brahma Reddy Battula via ozawa)
 
+    HADOOP-11893. Mark org.apache.hadoop.security.token.Token as
+    @InterfaceAudience.Public. (Brahma Reddy Battula via stevel)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
index 5fe0391..798c8c9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.ipc.StandbyException;
  * The server-side secret manager for each token type.
  * @param <T> The type of the token identifier
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class SecretManager<T extends TokenIdentifier> {
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index bd254e6..2420155 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -36,7 +36,7 @@ import java.util.ServiceLoader;
 /**
  * The client-side form of the token.
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class Token<T extends TokenIdentifier> implements Writable {
   public static final Log LOG = LogFactory.getLog(Token.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
index ebf9d58..0b111cc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.UserGroupInformation;
  * An identifier that identifies a token, may contain public information 
  * about a token, including its kind (or type).
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class TokenIdentifier implements Writable {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenInfo.java
index 1125b7b..cc76824 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenInfo.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenInfo.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @Retention(RetentionPolicy.RUNTIME)
 @Target(ElementType.TYPE)
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public @interface TokenInfo {
   /** The type of TokenSelector to be used */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java
index fbd3c93..11e275f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.io.Text;
 /**
  * This is the interface for plugins that handle tokens.
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class TokenRenewer {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenSelector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenSelector.java
index b3ec7a9..35481e4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenSelector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenSelector.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.io.Text;
  * @param <T>
  *          T extends TokenIdentifier
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 public interface TokenSelector<T extends TokenIdentifier> {
   Token<T> selectToken(Text service,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c2bc87/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/package-info.java
index 7ee033a..e015056b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/package-info.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/package-info.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
+@InterfaceAudience.Public
 @InterfaceStability.Evolving
 package org.apache.hadoop.security.token;
 import org.apache.hadoop.classification.InterfaceAudience;